{"payload":{"header_redesign_enabled":false,"results":[{"id":"66823715","archived":false,"color":"#3572A5","followers":6349,"has_funding_file":false,"hl_name":"intel-analytics/ipex-llm","hl_trunc_description":"Accelerate local LLM inference and finetuning (LLaMA, Mistral, ChatGLM, Qwen, Baichuan, Mixtral, Gemma, Phi, MiniCPM, etc.) on Intel CPU …","language":"Python","mirror":false,"owned_by_organization":true,"public":true,"repo":{"repository":{"id":66823715,"name":"ipex-llm","owner_id":10941215,"owner_login":"intel-analytics","updated_at":"2024-08-06T11:29:33.743Z","has_issues":true}},"sponsorable":false,"topics":["gpu","transformers","pytorch","llm"],"type":"Public","help_wanted_issues_count":0,"good_first_issue_issues_count":11,"starred_by_current_user":false}],"type":"repositories","page":1,"page_count":1,"elapsed_millis":74,"errors":[],"result_count":1,"facets":[],"protected_org_logins":[],"topics":null,"query_id":"","logged_in":false,"sign_up_path":"/signup?source=code_search_results","sign_in_path":"/login?return_to=https%3A%2F%2Fgithub.com%2Fsearch%3Fq%3Drepo%253Aintel-analytics%252Fipex-llm%2B%2Blanguage%253APython","metadata":null,"csrf_tokens":{"/intel-analytics/ipex-llm/star":{"post":"ftVwblX0fC0faVS8lw42OaCi80FtA86qesVj2Acx53U9d7nrX8whBfuNRVwGjnu_AlwdurvtyGopJNOQNNxQdA"},"/intel-analytics/ipex-llm/unstar":{"post":"DvTdnwdyJi3msO8kBu5wf-BcWFXc-EfDVmcAattg-JZdaaE-trQgQvdt4gwXxQmZ-3DxEsoOjRI2zm_gooZesQ"},"/sponsors/batch_deferred_sponsor_buttons":{"post":"bswHWgkszhAsIa2T1KhFgqS2P1xXjE_VVkzvY6Uw4Cb2su6n_ZYf19PTa3HEVIqaY5HmQ85_3QhDksMzrp50dg"}}},"title":"Repository search results"}