Skip to content

Commit

Permalink
[Cherry-Pick]Cp fit paddle26 (#1823)
Browse files Browse the repository at this point in the history
  • Loading branch information
RachelXu7 authored Dec 28, 2023
1 parent dcf79e9 commit 521157e
Show file tree
Hide file tree
Showing 2 changed files with 16 additions and 6 deletions.
19 changes: 13 additions & 6 deletions paddleslim/quant/advanced/gptq.py
Original file line number Diff line number Diff line change
Expand Up @@ -106,8 +106,9 @@ def fasterquant(self,
H = self.hessian
del self.hessian
dead = paddle.where(paddle.diag(H) == 0)
H[dead, dead] = 1
W[:, dead] = 0
if dead[0].shape[0] != 0:
H[dead, dead] = 1
W[:, dead] = 0
del dead
if actorder:
perm = paddle.argsort(paddle.diag(H), descending=True)
Expand All @@ -122,9 +123,15 @@ def fasterquant(self,
damp = percdamp * paddle.mean(paddle.diag(H))
diag = paddle.arange(self.columns)
H[diag, diag] += damp

H = paddle.inverse(H)
H = paddle.linalg.cholesky(H, upper=True)
try:
H = paddle.inverse(H)
H = paddle.linalg.cholesky(H, upper=True)
except:
print('We skip GPTQ this layer now.')
print(
'If you want GPTQ this layer, please try setting damp_percent larger or increasing the number of samples.'
)
return
Hinv = H

for i1 in range(0, self.columns, blocksize):
Expand Down Expand Up @@ -182,4 +189,4 @@ def fasterquant(self,

self.quantized = True
del H, Q, Hinv, W, Losses
paddle.device.cuda.empty_cache()
paddle.device.cuda.empty_cache()
3 changes: 3 additions & 0 deletions paddleslim/quant/advanced/piecewise_search.py
Original file line number Diff line number Diff line change
Expand Up @@ -97,6 +97,8 @@ def search(self, layer_name, sampled_input, act_abs_max, weight):
mask_for_search = paddle.where(labels == centroids.argsort()[i],
1., 0.)
mask_for_ones = paddle.where(mask_for_search == 0., 1., 0.)
mask_for_search = mask_for_search.cast(dtype)
mask_for_ones = mask_for_ones.cast(dtype)

while alpha <= alpha_max:
if alpha < 1:
Expand Down Expand Up @@ -125,6 +127,7 @@ def search(self, layer_name, sampled_input, act_abs_max, weight):
if smooth_scale_out is not None:
mask_for_ones_new = paddle.where(
smooth_scale_out == 0., 1., 0.)
mask_for_ones_new = mask_for_ones_new.cast(dtype)
mask_for_ones *= mask_for_ones_new
smooth_scale_ = smooth_scale_out + smooth_scale
smooth_scale_tmp = smooth_scale_ + mask_for_ones
Expand Down

0 comments on commit 521157e

Please sign in to comment.