{"payload":{"pageCount":1,"repositories":[{"type":"Public","name":"SurgVLP","owner":"CAMMA-public","isFork":false,"description":"Learning multi-modal representations by watching hundreds of surgical video lectures","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":1,"starsCount":26,"forksCount":0,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-07-20T08:14:10.647Z"}},{"type":"Public","name":"ScalingSurgicalSSL","owner":"CAMMA-public","isFork":true,"description":"Official repository for \"Jumpstarting Surgical Computer Vision\"","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":1,"forksCount":8,"license":"Other","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-07-06T09:47:47.558Z"}},{"type":"Public","name":"SSG-VQA","owner":"CAMMA-public","isFork":false,"description":"SSG-VQA is a Visual Question Answering (VQA) dataset on laparoscopic videos providing diverse, geometrically grounded, unbiased and surgical action-oriented queries generated using scene graphs.","allTopics":["scene-graph","vqa-dataset","surgical-data-science"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":1,"starsCount":28,"forksCount":1,"license":"Other","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-25T08:41:32.805Z"}},{"type":"Public","name":"SelfPose3d","owner":"CAMMA-public","isFork":false,"description":"Official code for \"SelfPose3d: Self-Supervised Multi-Person Multi-View 3d Pose Estimation\"","allTopics":["human-pose-estimation","multi-view-learning","self-supervised-learning","3d-human-shape-and-pose-estimation","multi-view-multi-person-3d-human-pose-estimation"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":4,"starsCount":22,"forksCount":1,"license":"Other","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-22T15:37:29.154Z"}},{"type":"Public","name":"SurgLatentGraph","owner":"CAMMA-public","isFork":false,"description":"This repository contains the code associated with our 2023 TMI paper \"Latent Graph Representations for Critical View of Safety Assessment\" and our MICCAI 2023 paper \"Encoding Surgical Videos as Spatiotemporal Graphs for Object and Anatomy-Driven Reasoning\".","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":1,"starsCount":21,"forksCount":2,"license":"Other","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-21T18:16:16.603Z"}},{"type":"Public","name":"MultiBypass140","owner":"CAMMA-public","isFork":false,"description":"","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":9,"forksCount":1,"license":"Other","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-08T19:36:50.717Z"}},{"type":"Public","name":"cholectrack20","owner":"CAMMA-public","isFork":false,"description":"","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":7,"forksCount":0,"license":"Other","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-05-31T12:41:12.725Z"}},{"type":"Public template","name":"ivtmetrics","owner":"CAMMA-public","isFork":false,"description":"A Python evaluation metrics package for surgical action triplet recognition","allTopics":["recognition","detection","average-precision","action-triplet","python","machine-learning","metrics","object-detection"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":12,"forksCount":2,"license":"BSD 2-Clause \"Simplified\" License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-05-15T06:27:19.317Z"}},{"type":"Public","name":"mcit-ig","owner":"CAMMA-public","isFork":false,"description":"","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":2,"starsCount":2,"forksCount":0,"license":"Other","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-10-04T18:10:35.616Z"}},{"type":"Public","name":"rendezvous","owner":"CAMMA-public","isFork":false,"description":"A transformer-inspired neural network for surgical action triplet recognition from laparoscopic videos.","allTopics":["python","deep-learning","tensorflow","python3","pytorch","transformer","attention-mechanism","action-recognition","weakly-supervised-learning","state-of-the-art","tensorflow2","laparoscopy","cholect45","cholect50","action-triplet"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":2,"starsCount":22,"forksCount":8,"license":"Other","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-08-30T10:03:51.496Z"}},{"type":"Public","name":"cholect50","owner":"CAMMA-public","isFork":false,"description":"A repository for surgical action triplet dataset. Data are videos of laparoscopic cholecystectomy that have been annotated with <instrument, verb, target> labels for every surgical fine-grained activity.","allTopics":["python","data","machine-learning","recognition","localization","deep-learning","tensorflow","detection","pytorch","artificial-intelligence","action","datasets","endoscopy","laparoscopy","surgical-data-science","cholect45","cholect50","cholect40"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":1,"issueCount":0,"starsCount":36,"forksCount":4,"license":"Other","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-08-07T17:42:49.476Z"}},{"type":"Public","name":"rendezvous-in-time","owner":"CAMMA-public","isFork":false,"description":"rendezvous-in-time","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":2,"starsCount":8,"forksCount":3,"license":"Other","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-07-05T15:45:02.532Z"}},{"type":"Public","name":"SelfSupSurg","owner":"CAMMA-public","isFork":false,"description":"Official repository for \"Dissecting Self-Supervised Learning Methods for Surgical Computer Vision\"","allTopics":["endoscopic-vision","surgical-phase-recognition","surgical-data-science","surgical-scene-segmentation","surgical-computer-vision","laparascopic-cholecystectomy","deep-learning","semi-supervised-learning","transfer-learning","self-supervised-learning"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":2,"starsCount":31,"forksCount":8,"license":"Other","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-07-02T16:22:21.558Z"}},{"type":"Public","name":"out-of-body-detector","owner":"CAMMA-public","isFork":false,"description":"Application for classifying out-of-body images in endoscopic videos","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":5,"forksCount":1,"license":"Other","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-06-07T12:51:20.817Z"}},{"type":"Public","name":"tripnet","owner":"CAMMA-public","isFork":false,"description":"","allTopics":["action-recognition","action-triplet","deep-learning"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":2,"starsCount":15,"forksCount":3,"license":"Other","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-03-01T13:07:22.805Z"}},{"type":"Public","name":"attention-tripnet","owner":"CAMMA-public","isFork":false,"description":"","allTopics":["python","attention-mechanism","action-recognition","action-triplet"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":9,"forksCount":1,"license":"Other","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-03-01T13:07:08.930Z"}},{"type":"Public","name":"cholect45","owner":"CAMMA-public","isFork":false,"description":"Laparoscopic video dataset for surgical action triplet recognition","allTopics":["python","tensorflow","python3","pytorch","tensorflow2","laparoscopy","endoscopic-images","cholect45","cholect50","action-triplet","cholect40","dataset","action-recognition"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":37,"forksCount":2,"license":"Other","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-02-08T15:07:35.691Z"}},{"type":"Public","name":"HPE-AdaptOR","owner":"CAMMA-public","isFork":false,"description":"Code for Unsupervised domain adaptation for clinician pose estimation and instance segmentation in the OR","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":1,"starsCount":4,"forksCount":0,"license":"Other","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2022-07-07T05:30:03.518Z"}},{"type":"Public","name":"ORPose-Color","owner":"CAMMA-public","isFork":false,"description":"Inference demo for the MICCAI-2020 paper \"Self-supervision on Unlabelled OR Data for Multi-person 2D/3D Human Pose Estimation\"","allTopics":["colab","knowledge-distillation","data-disillation","operating-room","low-resolution-images","human-pose-estimation"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":11,"forksCount":1,"license":"Other","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2022-05-19T08:39:32.090Z"}},{"type":"Public","name":"cvs_annotator","owner":"CAMMA-public","isFork":false,"description":"Application for reviewing and annotating frames with critical view of safety (CVS) criteria and other relevant information","allTopics":["annotation-tool","surgical-data-science","cholecystectomy","medical-computer-vision"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":2,"starsCount":2,"forksCount":2,"license":"Other","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2021-08-13T14:18:04.125Z"}},{"type":"Public","name":"MVOR","owner":"CAMMA-public","isFork":false,"description":"Multi-View Operating Room (MVOR) dataset consists of synchronized multi-view frames recorded by three RGB-D cameras in a hybrid OR during real clinical interventions. We provide camera calibration parameters, color and depth frames, human bounding boxes, and 2D/3D pose annotations. The MVOR was released in the MICCAI-LABELS 2018 workshop. ","allTopics":["colab","person-detection","3dposeestimation","multi-view-rgbd-images","operating-room","dataset","human-pose-estimation"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":50,"forksCount":8,"license":"Other","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2020-12-11T13:48:14.118Z"}},{"type":"Public","name":"ConvLSTM-Surgical-Tool-Tracker","owner":"CAMMA-public","isFork":false,"description":"This repo contains an implementation code for the weakly supervised surgical tool tracker. In this research, the temporal dependency in surgical video data is modeled using a convolutional LSTM which is trained only on image level labels to detect, localize and track surgical instruments.","allTopics":["tracking","localization","deep-learning","analysis","detection","lstm","convolutional-neural-network","weakly-supervised-learning","temporal-data","video-analysis","convlstm","surgical-tools","temporal-convolutional-network","endoscopic-images","surgical-video","surgical-instrument"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":2,"starsCount":37,"forksCount":10,"license":"Other","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2020-09-01T13:05:44.598Z"}},{"type":"Public","name":"ORPose-Depth","owner":"CAMMA-public","isFork":false,"description":"Inference demo and evaluation scripts for the MICCAI-2019 paper \"Human Pose Estimation on Privacy-Preserving Low-Resolution Depth Images\"","allTopics":["colab","privacy-preserving","depth-images","operating-room","low-resolution-data","depthpose","human-pose-estimation"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":4,"starsCount":22,"forksCount":4,"license":"Other","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2020-08-28T15:25:05.794Z"}}],"repositoryCount":23,"userInfo":null,"searchable":true,"definitions":[],"typeFilters":[{"id":"all","text":"All"},{"id":"public","text":"Public"},{"id":"source","text":"Sources"},{"id":"fork","text":"Forks"},{"id":"archived","text":"Archived"},{"id":"template","text":"Templates"}],"compactMode":false},"title":"CAMMA-public repositories"}