[tds_menu_login inline="yes" guest_tdicon="td-icon-profile" logout_tdicon="td-icon-log-out" tdc_css="eyJwaG9uZSI6eyJtYXJnaW4tcmlnaHQiOiIyMCIsIm1hcmdpbi1ib3R0b20iOiIwIiwibWFyZ2luLWxlZnQiOiI2IiwiZGlzcGxheSI6IiJ9LCJwaG9uZV9tYXhfd2lkdGgiOjc2N30=" toggle_hide="eyJwaG9uZSI6InllcyJ9" ia_space="eyJwaG9uZSI6IjAifQ==" icon_size="eyJhbGwiOjI0LCJwaG9uZSI6IjIwIn0=" avatar_size="eyJwaG9uZSI6IjIwIn0=" show_menu="yes" menu_offset_top="eyJwaG9uZSI6IjE4In0=" menu_offset_horiz="eyJhbGwiOjgsInBob25lIjoiLTMifQ==" menu_width="eyJwaG9uZSI6IjE4MCJ9" menu_horiz_align="eyJhbGwiOiJjb250ZW50LWhvcml6LWxlZnQiLCJwaG9uZSI6ImNvbnRlbnQtaG9yaXotcmlnaHQifQ==" menu_uh_padd="eyJwaG9uZSI6IjEwcHggMTVweCA4cHgifQ==" menu_gh_padd="eyJwaG9uZSI6IjEwcHggMTVweCA4cHgifQ==" menu_ul_padd="eyJwaG9uZSI6IjhweCAxNXB4In0=" menu_ul_space="eyJwaG9uZSI6IjYifQ==" menu_ulo_padd="eyJwaG9uZSI6IjhweCAxNXB4IDEwcHgifQ==" menu_gc_padd="eyJwaG9uZSI6IjhweCAxNXB4IDEwcHgifQ==" menu_bg="var(--news-hub-black)" menu_shadow_shadow_size="eyJwaG9uZSI6IjAifQ==" menu_arrow_color="rgba(0,0,0,0)" menu_uh_color="var(--news-hub-light-grey)" menu_uh_border_color="var(--news-hub-dark-grey)" menu_ul_link_color="var(--news-hub-white)" menu_ul_link_color_h="var(--news-hub-accent-hover)" menu_ul_sep_color="var(--news-hub-dark-grey)" menu_uf_txt_color="var(--news-hub-white)" menu_uf_txt_color_h="var(--news-hub-accent-hover)" menu_uf_border_color="var(--news-hub-dark-grey)" f_uh_font_size="eyJwaG9uZSI6IjEyIn0=" f_uh_font_line_height="eyJwaG9uZSI6IjEuMyJ9" f_uh_font_family="eyJwaG9uZSI6IjMyNSJ9" f_links_font_size="eyJwaG9uZSI6IjEyIn0=" f_links_font_line_height="eyJwaG9uZSI6IjEuMyJ9" f_links_font_family="eyJwaG9uZSI6IjMyNSJ9" f_uf_font_size="eyJwaG9uZSI6IjEyIn0=" f_uf_font_line_height="eyJwaG9uZSI6IjEuMyJ9" f_uf_font_family="eyJwaG9uZSI6IjMyNSJ9" f_gh_font_family="eyJwaG9uZSI6IjMyNSJ9" f_gh_font_size="eyJwaG9uZSI6IjEyIn0=" f_gh_font_line_height="eyJwaG9uZSI6IjEuMyJ9" f_btn1_font_family="eyJwaG9uZSI6IjMyNSJ9" f_btn1_font_weight="eyJwaG9uZSI6IjcwMCJ9" f_btn1_font_transform="eyJwaG9uZSI6InVwcGVyY2FzZSJ9" f_btn2_font_weight="eyJwaG9uZSI6IjcwMCJ9" f_btn2_font_transform="eyJwaG9uZSI6InVwcGVyY2FzZSJ9" f_btn2_font_family="eyJwaG9uZSI6IjMyNSJ9"]
3.6 C
New York
[tds_menu_login guest_tdicon="td-icon-profile" logout_tdicon="td-icon-log-out" tdc_css="eyJhbGwiOnsibWFyZ2luLWJvdHRvbSI6IjAiLCJkaXNwbGF5IjoiIn19" toggle_txt_color="var(--news-hub-white)" menu_offset_top="eyJhbGwiOiIxOSIsImxhbmRzY2FwZSI6IjE3IiwicG9ydHJhaXQiOiIxNSJ9" menu_offset_horiz="eyJhbGwiOi02LCJsYW5kc2NhcGUiOiItMyIsInBvcnRyYWl0IjoiLTIifQ==" menu_horiz_align="content-horiz-right" menu_bg="var(--news-hub-black)" menu_uh_color="var(--news-hub-light-grey)" menu_uh_border_color="var(--news-hub-dark-grey)" menu_ul_link_color="#ffffff" menu_ul_link_color_h="var(--news-hub-accent-hover)" menu_ul_sep_color="var(--news-hub-dark-grey)" menu_uf_txt_color="var(--news-hub-white)" menu_uf_txt_color_h="var(--news-hub-accent-hover)" menu_uf_border_color="var(--news-hub-dark-grey)" f_uh_font_family="325" f_uh_font_line_height="1.3" f_links_font_family="325" f_links_font_line_height="1.3" f_uf_font_line_height="1.3" f_uf_font_family="325" menu_uh_padd="eyJhbGwiOiIyMHB4IDI1cHggMThweCIsImxhbmRzY2FwZSI6IjE1cHggMjBweCAxM3B4IiwicG9ydHJhaXQiOiIxMHB4IDE1cHggOHB4In0=" menu_ul_padd="eyJhbGwiOiIxOHB4IDI1cHgiLCJsYW5kc2NhcGUiOiIxNnB4IDIwcHgiLCJwb3J0cmFpdCI6IjhweCAxNXB4In0=" menu_ul_space="eyJhbGwiOiIxMCIsImxhbmRzY2FwZSI6IjgiLCJwb3J0cmFpdCI6IjYifQ==" menu_ulo_padd="eyJhbGwiOiIxOHB4IDI1cHggMjBweCIsImxhbmRzY2FwZSI6IjEzcHggMjBweCAxNXB4IiwicG9ydHJhaXQiOiI4cHggMTVweCAxMHB4In0=" menu_shadow_shadow_size="0" menu_arrow_color="rgba(255,255,255,0)" menu_width="eyJhbGwiOiIyMjAiLCJwb3J0cmFpdCI6IjE4MCJ9" show_version="" menu_gh_padd="eyJhbGwiOiIyMHB4IDI1cHggMThweCIsImxhbmRzY2FwZSI6IjE1cHggMjBweCAxM3B4IiwicG9ydHJhaXQiOiIxMHB4IDE1cHggOHB4In0=" menu_gc_padd="eyJhbGwiOiIxOHB4IDI1cHggMjBweCIsImxhbmRzY2FwZSI6IjEzcHggMjBweCAxNXB4IiwicG9ydHJhaXQiOiI4cHggMTVweCAxMHB4In0=" menu_gh_color="var(--news-hub-light-grey)" menu_gh_border_color="var(--news-hub-dark-grey)" f_gh_font_family="325" menu_gc_btn1_bg_color="var(--news-hub-accent)" menu_gc_btn1_bg_color_h="var(--news-hub-accent-hover)" menu_gc_btn2_color="var(--news-hub-accent)" menu_gc_btn2_color_h="var(--news-hub-accent-hover)" f_btn1_font_family="325" f_btn1_font_transform="uppercase" f_btn2_font_family="325" f_btn2_font_transform="uppercase" f_btn1_font_weight="700" f_btn2_font_weight="700" show_menu="yes" f_uf_font_size="eyJsYW5kc2NhcGUiOiIxMiIsInBvcnRyYWl0IjoiMTIifQ==" icon_color="var(--news-hub-white)" icon_size="eyJhbGwiOjIyLCJsYW5kc2NhcGUiOiIyMCIsInBvcnRyYWl0IjoiMTgifQ==" avatar_size="eyJhbGwiOiIyMiIsImxhbmRzY2FwZSI6IjIwIiwicG9ydHJhaXQiOiIxOCJ9" ia_space="eyJhbGwiOiIxMCIsImxhbmRzY2FwZSI6IjgiLCJwb3J0cmFpdCI6IjYifQ==" f_toggle_font_family="325" f_toggle_font_size="eyJhbGwiOiIxNCIsImxhbmRzY2FwZSI6IjEzIiwicG9ydHJhaXQiOiIxMiJ9" logout_size="eyJhbGwiOjE0LCJsYW5kc2NhcGUiOiIxMyJ9" f_uh_font_size="eyJsYW5kc2NhcGUiOiIxMyIsInBvcnRyYWl0IjoiMTIifQ==" f_links_font_size="eyJsYW5kc2NhcGUiOiIxMyIsInBvcnRyYWl0IjoiMTIifQ==" f_gh_font_size="eyJsYW5kc2NhcGUiOiIxMyIsInBvcnRyYWl0IjoiMTIifQ=="]

Tech giants form AI group to counter Nvidia with new interconnect standard

Published:

Abstract image of data center with flowchart.

Enlarge (credit: Getty Images)

On Thursday, several major tech companies, including Google, Intel, Microsoft, Meta, AMD, Hewlett Packard Enterprise, Cisco, and Broadcom, announced the formation of the Ultra Accelerator Link (UALink) Promoter Group to develop a new interconnect standard for AI accelerator chips in data centers. The group aims to create an alternative to Nvidia's proprietary NVLink interconnect technology, which links together multiple servers that power today's AI applications like ChatGPT.

The beating heart of AI these days lies in GPUs, which can perform massive numbers of matrix multiplications—necessary for running neural network architecture—in parallel. But one GPU often isn't enough for complex AI systems. NVLink can connect multiple AI accelerator chips within a server or across multiple servers. These interconnects enable faster data transfer and communication between the accelerators, allowing them to work together more efficiently on complex tasks like training large AI models.

This linkage is a key part of any modern AI data center system, and whoever controls the link standard can effectively dictate which hardware the tech companies will use. Along those lines, the UALink group seeks to establish an open standard that allows multiple companies to contribute and develop AI hardware advancements instead of being locked into Nvidia's proprietary ecosystem. This approach is similar to other open standards, such as Compute Express Link (CXL)—created by Intel in 2019—which provides high-speed, high-capacity connections between CPUs and devices or memory in data centers.

Read 5 remaining paragraphs | Comments

Ars Technica - All contentContinue reading/original-link]

Related articles

spot_img

Recent articles

spot_img