Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Resolve issues with shared memory on new torch versions #15

Open
wants to merge 6 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions bi_lstm_crf/app/predict.py
100644 → 100755
Original file line number Diff line number Diff line change
Expand Up @@ -76,6 +76,8 @@ def main():
help='the training device: "cuda:0", "cpu:0". It will be auto-detected by default')

args = parser.parse_args()
if args.sentence[0] == "[":
args.sentence = json.loads(args.sentence)

results = WordsTagger(args.model_dir, args.device)([args.sentence])
print(args.sentence)
Expand Down
9 changes: 6 additions & 3 deletions bi_lstm_crf/app/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ def __eval_model(model, device, dataloader, desc):
losses, nums = zip(*[
(model.loss(xb.to(device), yb.to(device)), len(xb))
for xb, yb in tqdm(dataloader, desc=desc)])
return np.sum(np.multiply(losses, nums)) / np.sum(nums)
return torch.sum(torch.multiply(torch.tensor(losses), torch.tensor(nums))) / np.sum(nums)


def __save_loss(losses, file_path):
Expand Down Expand Up @@ -90,7 +90,7 @@ def train(args):
print("training completed. test loss: {:.2f}".format(test_loss))


def main():
def main(argv=None):
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('corpus_dir', type=str, help="the corpus directory")
Expand All @@ -114,7 +114,10 @@ def main():
parser.add_argument('--num_rnn_layers', type=int, default=1, help='the number of RNN layers')
parser.add_argument('--rnn_type', type=str, default="lstm", help='RNN type, choice: "lstm", "gru"')

args = parser.parse_args()
if argv is None:
args = parser.parse_args()
else:
args = parser.parse_args(argv)

train(args)

Expand Down
2 changes: 1 addition & 1 deletion bi_lstm_crf/app/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ def build_model(args, processor, load=True, verbose=False):
# weights
model_path = model_filepath(args.model_dir)
if exists(model_path) and load:
state_dict = torch.load(model_path)
state_dict = torch.load(model_path, map_location=running_device(args.device))
model.load_state_dict(state_dict)
if verbose:
print("load model weights from {}".format(model_path))
Expand Down
2 changes: 1 addition & 1 deletion bi_lstm_crf/model/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ def __build_features(self, sentences):
sorted_seq_length, perm_idx = seq_length.sort(descending=True)
embeds = embeds[perm_idx, :]

pack_sequence = pack_padded_sequence(embeds, lengths=sorted_seq_length, batch_first=True)
pack_sequence = pack_padded_sequence(embeds, lengths=sorted_seq_length.cpu(), batch_first=True)
packed_output, _ = self.rnn(pack_sequence)
lstm_out, _ = pad_packed_sequence(packed_output, batch_first=True)
_, unperm_idx = perm_idx.sort()
Expand Down
2 changes: 2 additions & 0 deletions requirements.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
torch >= 1.13.0
tqdm >= 4.64.1