Skip to content

Commit

Permalink
Merge branch 'release/0.3.2'
Browse files Browse the repository at this point in the history
  • Loading branch information
emfomy committed Aug 10, 2022
2 parents 1db2d4d + 9abc421 commit f9c2055
Show file tree
Hide file tree
Showing 5 changed files with 18 additions and 9 deletions.
4 changes: 4 additions & 0 deletions .pylintrc
Original file line number Diff line number Diff line change
Expand Up @@ -9,3 +9,7 @@ disable =
[FORMAT]

max-line-length = 128

[TYPECHECK]

generated-members=torch.device
4 changes: 2 additions & 2 deletions README.rst
Original file line number Diff line number Diff line change
Expand Up @@ -113,8 +113,8 @@ Models
Model Usage
^^^^^^^^^^^

| You may use our model directly from the HuggingFace's transformers library
| 您可直接透過 HuggingFace's transformers 套件使用我們的模型
| You may use our model directly from the HuggingFace's transformers library.
| 您可直接透過 HuggingFace's transformers 套件使用我們的模型
.. code-block:: bash
Expand Down
2 changes: 1 addition & 1 deletion ckip_transformers/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
__copyright__ = "2021 CKIP Lab"

__title__ = "CKIP Transformers"
__version__ = "0.3.1"
__version__ = "0.3.2"
__description__ = "CKIP Transformers"
__license__ = "GPL-3.0"

Expand Down
6 changes: 3 additions & 3 deletions ckip_transformers/nlp/driver.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ class CkipWordSegmenter(CkipTokenClassification):
The pretrained model name provided by CKIP Transformers.
model_name : ``str`` *optional*, overwrites **model**
The custom pretrained model name (e.g. ``'ckiplab/bert-base-chinese-ws'``).
device : ``int``, *optional*, defaults to -1
device : ``int`` or ``torch.device``, *optional*, defaults to -1
Device ordinal for CPU/GPU supports.
Setting this to -1 will leverage CPU, a positive will run the model on the associated CUDA device id.
"""
Expand Down Expand Up @@ -132,7 +132,7 @@ class CkipPosTagger(CkipTokenClassification):
The pretrained model name provided by CKIP Transformers.
model_name : ``str`` *optional*, overwrites **model**
The custom pretrained model name (e.g. ``'ckiplab/bert-base-chinese-pos'``).
device : ``int``, *optional*, defaults to -1
device : ``int`` or ``torch.device``, *optional*, defaults to -1
Device ordinal for CPU/GPU supports.
Setting this to -1 will leverage CPU, a positive will run the model on the associated CUDA device id.
"""
Expand Down Expand Up @@ -222,7 +222,7 @@ class CkipNerChunker(CkipTokenClassification):
The pretrained model name provided by CKIP Transformers.
model_name : ``str`` *optional*, overwrites **model**
The custom pretrained model name (e.g. ``'ckiplab/bert-base-chinese-ner'``).
device : ``int``, *optional*, defaults to -1
device : ``int`` or ``torch.device``, *optional*, defaults to -1
Device ordinal for CPU/GPU supports.
Setting this to -1 will leverage CPU, a positive will run the model on the associated CUDA device id.
"""
Expand Down
11 changes: 8 additions & 3 deletions ckip_transformers/nlp/util.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ class CkipTokenClassification(metaclass=ABCMeta):
The pretrained model name (e.g. ``'ckiplab/bert-base-chinese-ws'``).
tokenizer_name : ``str``, *optional*, defaults to **model_name**
The pretrained tokenizer name (e.g. ``'bert-base-chinese'``).
device : ``int``, *optional*, defaults to -1
device : ``int`` or ``torch.device``, *optional*, defaults to -1
Device ordinal for CPU/GPU supports.
Setting this to -1 will leverage CPU, a positive will run the model on the associated CUDA device id.
"""
Expand All @@ -61,12 +61,17 @@ def __init__(
model_name: str,
tokenizer_name: Optional[str] = None,
*,
device: int = -1,
device: Union[int, torch.device] = -1,
):
self.model = AutoModelForTokenClassification.from_pretrained(model_name)
self.tokenizer = BertTokenizerFast.from_pretrained(tokenizer_name or model_name)

self.device = torch.device("cpu" if device < 0 else f"cuda:{device}") # pylint: disable=no-member
# Allow passing a customized torch.device.
if isinstance(device, torch.device):
self.device = device
else:
self.device = torch.device("cpu" if device < 0 else f"cuda:{device}")

self.model.to(self.device)

########################################################################################################################
Expand Down

0 comments on commit f9c2055

Please sign in to comment.