diff --git a/CHANGELOG.md b/CHANGELOG.md index 9d68217b43c2..587d04d3285f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,56 @@ This changelog goes through all the changes that have been made in each release without substantial changes to our git log; to see the highlights of what has been added to each release, please refer to the [blog](https://blog.gitea.io). +## [1.14.3](https://github.com/go-gitea/gitea/releases/tag/v1.14.3) - 2021-06-18 + +* SECURITY + * Encrypt migration credentials at rest (#15895) (#16187) + * Only check access tokens if they are likely to be tokens (#16164) (#16171) + * Add missing SameSite settings for the i_like_gitea cookie (#16037) (#16039) + * Fix setting of SameSite on cookies (#15989) (#15991) +* API + * Repository object only count releases as releases (#16184) (#16190) + * EditOrg respect RepoAdminChangeTeamAccess option (#16184) (#16190) + * Fix overly strict edit pr permissions (#15900) (#16081) +* BUGFIXES + * Run processors on whole of text (#16155) (#16185) + * Class `issue-keyword` is being incorrectly stripped off spans (#16163) (#16172) + * Fix language switch for install page (#16043) (#16128) + * Fix bug on getIssueIDsByRepoID (#16119) (#16124) + * Set self-adjusting deadline for connection writing (#16068) (#16123) + * Fix http path bug (#16117) (#16120) + * Fix data URI scramble (#16098) (#16118) + * Merge all deleteBranch as one function and also fix bug when delete branch don't close related PRs (#16067) (#16097) + * git migration: don't prompt interactively for clone credentials (#15902) (#16082) + * Fix case change in ownernames (#16045) (#16050) + * Don't manipulate input params in email notification (#16011) (#16033) + * Remove branch URL before IssueRefURL (#15968) (#15970) + * Fix layout of milestone view (#15927) (#15940) + * GitHub Migration, migrate draft releases too (#15884) (#15888) + * Close the gitrepo when deleting the repository (#15876) (#15887) + * Upgrade xorm to v1.1.0 (#15869) (#15885) + * Fix blame row height alignment (#15863) (#15883) + * Fix error message when saving generated LOCAL_ROOT_URL config (#15880) (#15882) + * Backport Fix LFS commit finder not working (#15856) (#15874) + * Stop calling WriteHeader in Write (#15862) (#15873) + * Add timeout to writing to responses (#15831) (#15872) + * Return go-get info on subdirs (#15642) (#15871) + * Restore PAM user autocreation functionality (#15825) (#15867) + * Fix truncate utf8 string (#15828) (#15854) + * Fix bound address/port for caddy's certmagic library (#15758) (#15848) + * Upgrade unrolled/render to v1.1.1 (#15845) (#15846) + * Queue manager FlushAll can loop rapidly - add delay (#15733) (#15840) + * Tagger can be empty, as can Commit and Author - tolerate this (#15835) (#15839) + * Set autocomplete off on branches selector (#15809) (#15833) + * Add missing error to Doctor log (#15813) (#15824) + * Move restore repo to internal router and invoke from command to avoid open the same db file or queues files (#15790) (#15816) +* ENHANCEMENTS + * Removable media support to snap package (#16136) (#16138) + * Move sans-serif fallback font higher than emoji fonts (#15855) (#15892) +* DOCKER + * Only write config in environment-to-ini if there are changes (#15861) (#15868) + * Only offer hostcertificates if they exist (#15849) (#15853) + ## [1.14.2](https://github.com/go-gitea/gitea/releases/tag/v1.14.2) - 2021-05-09 * API diff --git a/docs/content/doc/advanced/config-cheat-sheet.en-us.md b/docs/content/doc/advanced/config-cheat-sheet.en-us.md index cfe9f6cc9d44..4f84e2ac3326 100644 --- a/docs/content/doc/advanced/config-cheat-sheet.en-us.md +++ b/docs/content/doc/advanced/config-cheat-sheet.en-us.md @@ -94,10 +94,11 @@ Values containing `#` or `;` must be quoted using `` ` `` or `"""`. - `REOPEN_KEYWORDS`: **reopen**, **reopens**, **reopened**: List of keywords used in Pull Request comments to automatically reopen a related issue - `DEFAULT_MERGE_MESSAGE_COMMITS_LIMIT`: **50**: In the default merge message for squash commits include at most this many commits. Set to `-1` to include all commits -- `DEFAULT_MERGE_MESSAGE_SIZE`: **5120**: In the default merge message for squash commits limit the size of the commit messages. Set to `-1` to have no limit. +- `DEFAULT_MERGE_MESSAGE_SIZE`: **5120**: In the default merge message for squash commits limit the size of the commit messages. Set to `-1` to have no limit. Only used if `POPULATE_SQUASH_COMMENT_WITH_COMMIT_MESSAGES` is `true`. - `DEFAULT_MERGE_MESSAGE_ALL_AUTHORS`: **false**: In the default merge message for squash commits walk all commits to include all authors in the Co-authored-by otherwise just use those in the limited list - `DEFAULT_MERGE_MESSAGE_MAX_APPROVERS`: **10**: In default merge messages limit the number of approvers listed as `Reviewed-by:`. Set to `-1` to include all. - `DEFAULT_MERGE_MESSAGE_OFFICIAL_APPROVERS_ONLY`: **true**: In default merge messages only include approvers who are officially allowed to review. +- `POPULATE_SQUASH_COMMENT_WITH_COMMIT_MESSAGES`: **false**: In default squash-merge messages include the commit message of all commits comprising the pull request. ### Repository - Issue (`repository.issue`) diff --git a/go.mod b/go.mod index c28db7b8edb6..0ac321f0e012 100644 --- a/go.mod +++ b/go.mod @@ -30,6 +30,8 @@ require ( github.com/couchbase/goutils v0.0.0-20210118111533-e33d3ffb5401 // indirect github.com/denisenkom/go-mssqldb v0.10.0 github.com/dgrijalva/jwt-go v3.2.0+incompatible + github.com/djherbis/buffer v1.2.0 + github.com/djherbis/nio/v3 v3.0.1 github.com/dustin/go-humanize v1.0.0 github.com/editorconfig/editorconfig-core-go/v2 v2.4.2 github.com/emirpasic/gods v1.12.0 diff --git a/go.sum b/go.sum index 65a9f0b363fe..31c66d0c4a0f 100644 --- a/go.sum +++ b/go.sum @@ -244,6 +244,11 @@ github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZm github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= +github.com/djherbis/buffer v1.1.0/go.mod h1:VwN8VdFkMY0DCALdY8o00d3IZ6Amz/UNVMWcSaJT44o= +github.com/djherbis/buffer v1.2.0 h1:PH5Dd2ss0C7CRRhQCZ2u7MssF+No9ide8Ye71nPHcrQ= +github.com/djherbis/buffer v1.2.0/go.mod h1:fjnebbZjCUpPinBRD+TDwXSOeNQ7fPQWLfGQqiAiUyE= +github.com/djherbis/nio/v3 v3.0.1 h1:6wxhnuppteMa6RHA4L81Dq7ThkZH8SwnDzXDYy95vB4= +github.com/djherbis/nio/v3 v3.0.1/go.mod h1:Ng4h80pbZFMla1yKzm61cF0tqqilXZYrogmWgZxOcmg= github.com/dlclark/regexp2 v1.1.6/go.mod h1:2pZnwuY/m+8K6iRw6wQdMtk+rH5tNGR1i55kozfMjCc= github.com/dlclark/regexp2 v1.2.0/go.mod h1:2pZnwuY/m+8K6iRw6wQdMtk+rH5tNGR1i55kozfMjCc= github.com/dlclark/regexp2 v1.4.0 h1:F1rxgk7p4uKjwIQxBs9oAXe5CqrXlCduYEJvrF4u93E= diff --git a/integrations/api_repo_test.go b/integrations/api_repo_test.go index 2c68d38846e7..7052e74b018e 100644 --- a/integrations/api_repo_test.go +++ b/integrations/api_repo_test.go @@ -466,7 +466,7 @@ func TestAPIRepoTransfer(t *testing.T) { session := loginUser(t, user.Name) token := getTokenForLoggedInUser(t, session) repoName := "moveME" - repo := new(models.Repository) + apiRepo := new(api.Repository) req := NewRequestWithJSON(t, "POST", fmt.Sprintf("/api/v1/user/repos?token=%s", token), &api.CreateRepoOption{ Name: repoName, Description: "repo move around", @@ -475,12 +475,12 @@ func TestAPIRepoTransfer(t *testing.T) { AutoInit: true, }) resp := session.MakeRequest(t, req, http.StatusCreated) - DecodeJSON(t, resp, repo) + DecodeJSON(t, resp, apiRepo) //start testing for _, testCase := range testCases { user = models.AssertExistsAndLoadBean(t, &models.User{ID: testCase.ctxUserID}).(*models.User) - repo = models.AssertExistsAndLoadBean(t, &models.Repository{ID: repo.ID}).(*models.Repository) + repo := models.AssertExistsAndLoadBean(t, &models.Repository{ID: apiRepo.ID}).(*models.Repository) session = loginUser(t, user.Name) token = getTokenForLoggedInUser(t, session) req = NewRequestWithJSON(t, "POST", fmt.Sprintf("/api/v1/repos/%s/%s/transfer?token=%s", repo.OwnerName, repo.Name, token), &api.TransferRepoOption{ @@ -491,7 +491,7 @@ func TestAPIRepoTransfer(t *testing.T) { } //cleanup - repo = models.AssertExistsAndLoadBean(t, &models.Repository{ID: repo.ID}).(*models.Repository) + repo := models.AssertExistsAndLoadBean(t, &models.Repository{ID: apiRepo.ID}).(*models.Repository) _ = models.DeleteRepository(user, repo.OwnerID, repo.ID) } diff --git a/integrations/org_count_test.go b/integrations/org_count_test.go index 755ee3cee59f..20917dc17e0c 100644 --- a/integrations/org_count_test.go +++ b/integrations/org_count_test.go @@ -114,11 +114,12 @@ func doCheckOrgCounts(username string, orgCounts map[string]int, strict bool, ca Name: username, }).(*models.User) - user.GetOrganizations(&models.SearchOrganizationsOptions{All: true}) + orgs, err := models.GetOrgsByUserID(user.ID, true) + assert.NoError(t, err) calcOrgCounts := map[string]int{} - for _, org := range user.Orgs { + for _, org := range orgs { calcOrgCounts[org.LowerName] = org.NumRepos count, ok := canonicalCounts[org.LowerName] if ok { diff --git a/models/user.go b/models/user.go index 002c050651f1..599834142219 100644 --- a/models/user.go +++ b/models/user.go @@ -112,7 +112,6 @@ type User struct { LoginName string Type UserType OwnedOrgs []*User `xorm:"-"` - Orgs []*User `xorm:"-"` Repos []*Repository `xorm:"-"` Location string Website string @@ -603,58 +602,6 @@ func (u *User) GetOwnedOrganizations() (err error) { return err } -// GetOrganizations returns paginated organizations that user belongs to. -// TODO: does not respect All and show orgs you privately participate -func (u *User) GetOrganizations(opts *SearchOrganizationsOptions) error { - sess := x.NewSession() - defer sess.Close() - - schema, err := x.TableInfo(new(User)) - if err != nil { - return err - } - groupByCols := &strings.Builder{} - for _, col := range schema.Columns() { - fmt.Fprintf(groupByCols, "`%s`.%s,", schema.Name, col.Name) - } - groupByStr := groupByCols.String() - groupByStr = groupByStr[0 : len(groupByStr)-1] - - sess.Select("`user`.*, count(repo_id) as org_count"). - Table("user"). - Join("INNER", "org_user", "`org_user`.org_id=`user`.id"). - Join("LEFT", builder. - Select("id as repo_id, owner_id as repo_owner_id"). - From("repository"). - Where(accessibleRepositoryCondition(u)), "`repository`.repo_owner_id = `org_user`.org_id"). - And("`org_user`.uid=?", u.ID). - GroupBy(groupByStr) - if opts.PageSize != 0 { - sess = opts.setSessionPagination(sess) - } - type OrgCount struct { - User `xorm:"extends"` - OrgCount int - } - orgCounts := make([]*OrgCount, 0, 10) - - if err := sess. - Asc("`user`.name"). - Find(&orgCounts); err != nil { - return err - } - - orgs := make([]*User, len(orgCounts)) - for i, orgCount := range orgCounts { - orgCount.User.NumRepos = orgCount.OrgCount - orgs[i] = &orgCount.User - } - - u.Orgs = orgs - - return nil -} - // DisplayName returns full name if it's not empty, // returns username otherwise. func (u *User) DisplayName() string { diff --git a/modules/git/batch_reader.go b/modules/git/batch_reader.go index d6ee0ce8e04d..678b18470878 100644 --- a/modules/git/batch_reader.go +++ b/modules/git/batch_reader.go @@ -11,6 +11,9 @@ import ( "math" "strconv" "strings" + + "github.com/djherbis/buffer" + "github.com/djherbis/nio/v3" ) // WriteCloserError wraps an io.WriteCloser with an additional CloseWithError function @@ -42,7 +45,7 @@ func CatFileBatchCheck(repoPath string) (WriteCloserError, *bufio.Reader, func() } }() - // For simplicities sake we'll us a buffered reader to read from the cat-file --batch + // For simplicities sake we'll use a buffered reader to read from the cat-file --batch-check batchReader := bufio.NewReader(batchStdoutReader) return batchStdinWriter, batchReader, cancel @@ -53,7 +56,7 @@ func CatFileBatch(repoPath string) (WriteCloserError, *bufio.Reader, func()) { // We often want to feed the commits in order into cat-file --batch, followed by their trees and sub trees as necessary. // so let's create a batch stdin and stdout batchStdinReader, batchStdinWriter := io.Pipe() - batchStdoutReader, batchStdoutWriter := io.Pipe() + batchStdoutReader, batchStdoutWriter := nio.Pipe(buffer.New(32 * 1024)) cancel := func() { _ = batchStdinReader.Close() _ = batchStdinWriter.Close() @@ -74,7 +77,7 @@ func CatFileBatch(repoPath string) (WriteCloserError, *bufio.Reader, func()) { }() // For simplicities sake we'll us a buffered reader to read from the cat-file --batch - batchReader := bufio.NewReader(batchStdoutReader) + batchReader := bufio.NewReaderSize(batchStdoutReader, 32*1024) return batchStdinWriter, batchReader, cancel } @@ -84,22 +87,31 @@ func CatFileBatch(repoPath string) (WriteCloserError, *bufio.Reader, func()) { // SP SP LF // sha is a 40byte not 20byte here func ReadBatchLine(rd *bufio.Reader) (sha []byte, typ string, size int64, err error) { - sha, err = rd.ReadBytes(' ') + typ, err = rd.ReadString('\n') if err != nil { return } - sha = sha[:len(sha)-1] - - typ, err = rd.ReadString('\n') - if err != nil { + if len(typ) == 1 { + typ, err = rd.ReadString('\n') + if err != nil { + return + } + } + idx := strings.IndexByte(typ, ' ') + if idx < 0 { + log("missing space typ: %s", typ) + err = ErrNotExist{ID: string(sha)} return } + sha = []byte(typ[:idx]) + typ = typ[idx+1:] - idx := strings.Index(typ, " ") + idx = strings.IndexByte(typ, ' ') if idx < 0 { err = ErrNotExist{ID: string(sha)} return } + sizeStr := typ[idx+1 : len(typ)-1] typ = typ[:idx] @@ -130,7 +142,7 @@ headerLoop: } // Discard the rest of the tag - discard := size - n + discard := size - n + 1 for discard > math.MaxInt32 { _, err := rd.Discard(math.MaxInt32) if err != nil { @@ -200,85 +212,42 @@ func To40ByteSHA(sha, out []byte) []byte { return out } -// ParseTreeLineSkipMode reads an entry from a tree in a cat-file --batch stream -// This simply skips the mode - saving a substantial amount of time and carefully avoids allocations - except where fnameBuf is too small. +// ParseTreeLine reads an entry from a tree in a cat-file --batch stream +// This carefully avoids allocations - except where fnameBuf is too small. // It is recommended therefore to pass in an fnameBuf large enough to avoid almost all allocations // // Each line is composed of: // SP NUL <20-byte SHA> // // We don't attempt to convert the 20-byte SHA to 40-byte SHA to save a lot of time -func ParseTreeLineSkipMode(rd *bufio.Reader, fnameBuf, shaBuf []byte) (fname, sha []byte, n int, err error) { +func ParseTreeLine(rd *bufio.Reader, modeBuf, fnameBuf, shaBuf []byte) (mode, fname, sha []byte, n int, err error) { var readBytes []byte - // Skip the Mode - readBytes, err = rd.ReadSlice(' ') // NB: DOES NOT ALLOCATE SIMPLY RETURNS SLICE WITHIN READER BUFFER - if err != nil { - return - } - n += len(readBytes) - // Deal with the fname + // Read the Mode & fname readBytes, err = rd.ReadSlice('\x00') - copy(fnameBuf, readBytes) - if len(fnameBuf) > len(readBytes) { - fnameBuf = fnameBuf[:len(readBytes)] // cut the buf the correct size - } else { - fnameBuf = append(fnameBuf, readBytes[len(fnameBuf):]...) // extend the buf and copy in the missing bits - } - for err == bufio.ErrBufferFull { // Then we need to read more - readBytes, err = rd.ReadSlice('\x00') - fnameBuf = append(fnameBuf, readBytes...) // there is little point attempting to avoid allocations here so just extend - } - n += len(fnameBuf) if err != nil { return } - fnameBuf = fnameBuf[:len(fnameBuf)-1] // Drop the terminal NUL - fname = fnameBuf // set the returnable fname to the slice - - // Now deal with the 20-byte SHA - idx := 0 - for idx < 20 { - read := 0 - read, err = rd.Read(shaBuf[idx:20]) - n += read - if err != nil { - return - } - idx += read - } - sha = shaBuf - return -} - -// ParseTreeLine reads an entry from a tree in a cat-file --batch stream -// This carefully avoids allocations - except where fnameBuf is too small. -// It is recommended therefore to pass in an fnameBuf large enough to avoid almost all allocations -// -// Each line is composed of: -// SP NUL <20-byte SHA> -// -// We don't attempt to convert the 20-byte SHA to 40-byte SHA to save a lot of time -func ParseTreeLine(rd *bufio.Reader, modeBuf, fnameBuf, shaBuf []byte) (mode, fname, sha []byte, n int, err error) { - var readBytes []byte + idx := bytes.IndexByte(readBytes, ' ') + if idx < 0 { + log("missing space in readBytes ParseTreeLine: %s", readBytes) - // Read the Mode - readBytes, err = rd.ReadSlice(' ') - if err != nil { + err = &ErrNotExist{} return } - n += len(readBytes) - copy(modeBuf, readBytes) - if len(modeBuf) > len(readBytes) { - modeBuf = modeBuf[:len(readBytes)] - } else { - modeBuf = append(modeBuf, readBytes[len(modeBuf):]...) + n += idx + 1 + copy(modeBuf, readBytes[:idx]) + if len(modeBuf) >= idx { + modeBuf = modeBuf[:idx] + } else { + modeBuf = append(modeBuf, readBytes[len(modeBuf):idx]...) } - mode = modeBuf[:len(modeBuf)-1] // Drop the SP + mode = modeBuf + + readBytes = readBytes[idx+1:] // Deal with the fname - readBytes, err = rd.ReadSlice('\x00') copy(fnameBuf, readBytes) if len(fnameBuf) > len(readBytes) { fnameBuf = fnameBuf[:len(readBytes)] @@ -297,7 +266,7 @@ func ParseTreeLine(rd *bufio.Reader, modeBuf, fnameBuf, shaBuf []byte) (mode, fn fname = fnameBuf // Deal with the 20-byte SHA - idx := 0 + idx = 0 for idx < 20 { read := 0 read, err = rd.Read(shaBuf[idx:20]) diff --git a/modules/git/commit_info_nogogit.go b/modules/git/commit_info_nogogit.go index f34bef9f018c..2283510d9635 100644 --- a/modules/git/commit_info_nogogit.go +++ b/modules/git/commit_info_nogogit.go @@ -7,15 +7,11 @@ package git import ( - "bufio" - "bytes" "context" "fmt" "io" - "math" "path" "sort" - "strings" ) // GetCommitsInfo gets information of all commits that are corresponding to these entries @@ -43,21 +39,16 @@ func (tes Entries) GetCommitsInfo(ctx context.Context, commit *Commit, treePath return nil, nil, err } - for i, found := range commits { - if err := cache.Put(commit.ID.String(), path.Join(treePath, unHitPaths[i]), found.ID.String()); err != nil { + for pth, found := range commits { + if err := cache.Put(commit.ID.String(), path.Join(treePath, pth), found.ID.String()); err != nil { return nil, nil, err } - revs[unHitPaths[i]] = found + revs[pth] = found } } } else { sort.Strings(entryPaths) - revs = map[string]*Commit{} - var foundCommits []*Commit - foundCommits, err = GetLastCommitForPaths(ctx, commit, treePath, entryPaths) - for i, found := range foundCommits { - revs[entryPaths[i]] = found - } + revs, err = GetLastCommitForPaths(ctx, commit, treePath, entryPaths) } if err != nil { return nil, nil, err @@ -86,6 +77,8 @@ func (tes Entries) GetCommitsInfo(ctx context.Context, commit *Commit, treePath subModuleFile := NewSubModuleFile(entryCommit, subModuleURL, entry.ID.String()) commitsInfo[i].SubModuleFile = subModuleFile } + } else { + log("missing commit for %s", entry.Name()) } } @@ -125,220 +118,24 @@ func getLastCommitForPathsByCache(ctx context.Context, commitID, treePath string } // GetLastCommitForPaths returns last commit information -func GetLastCommitForPaths(ctx context.Context, commit *Commit, treePath string, paths []string) ([]*Commit, error) { +func GetLastCommitForPaths(ctx context.Context, commit *Commit, treePath string, paths []string) (map[string]*Commit, error) { // We read backwards from the commit to obtain all of the commits - - // We'll do this by using rev-list to provide us with parent commits in order - revListReader, revListWriter := io.Pipe() - defer func() { - _ = revListWriter.Close() - _ = revListReader.Close() - }() - - go func() { - stderr := strings.Builder{} - err := NewCommand("rev-list", "--format=%T", commit.ID.String()).SetParentContext(ctx).RunInDirPipeline(commit.repo.Path, revListWriter, &stderr) - if err != nil { - _ = revListWriter.CloseWithError(ConcatenateError(err, (&stderr).String())) - } else { - _ = revListWriter.Close() - } - }() + revs, err := WalkGitLog(ctx, commit.repo, commit, treePath, paths...) + if err != nil { + return nil, err + } batchStdinWriter, batchReader, cancel := commit.repo.CatFileBatch() defer cancel() - mapsize := 4096 - if len(paths) > mapsize { - mapsize = len(paths) - } - - path2idx := make(map[string]int, mapsize) - for i, path := range paths { - path2idx[path] = i - } - - fnameBuf := make([]byte, 4096) - modeBuf := make([]byte, 40) - - allShaBuf := make([]byte, (len(paths)+1)*20) - shaBuf := make([]byte, 20) - tmpTreeID := make([]byte, 40) - - // commits is the returnable commits matching the paths provided - commits := make([]string, len(paths)) - // ids are the blob/tree ids for the paths - ids := make([][]byte, len(paths)) - - // We'll use a scanner for the revList because it's simpler than a bufio.Reader - scan := bufio.NewScanner(revListReader) -revListLoop: - for scan.Scan() { - // Get the next parent commit ID - commitID := scan.Text() - if !scan.Scan() { - break revListLoop - } - commitID = commitID[7:] - rootTreeID := scan.Text() - - // push the tree to the cat-file --batch process - _, err := batchStdinWriter.Write([]byte(rootTreeID + "\n")) - if err != nil { - return nil, err - } - - currentPath := "" - - // OK if the target tree path is "" and the "" is in the paths just set this now - if treePath == "" && paths[0] == "" { - // If this is the first time we see this set the id appropriate for this paths to this tree and set the last commit to curCommit - if len(ids[0]) == 0 { - ids[0] = []byte(rootTreeID) - commits[0] = string(commitID) - } else if bytes.Equal(ids[0], []byte(rootTreeID)) { - commits[0] = string(commitID) - } - } - - treeReadingLoop: - for { - select { - case <-ctx.Done(): - return nil, ctx.Err() - default: - } - _, _, size, err := ReadBatchLine(batchReader) - if err != nil { - return nil, err - } - - // Handle trees - - // n is counter for file position in the tree file - var n int64 - - // Two options: currentPath is the targetTreepath - if treePath == currentPath { - // We are in the right directory - // Parse each tree line in turn. (don't care about mode here.) - for n < size { - fname, sha, count, err := ParseTreeLineSkipMode(batchReader, fnameBuf, shaBuf) - shaBuf = sha - if err != nil { - return nil, err - } - n += int64(count) - idx, ok := path2idx[string(fname)] - if ok { - // Now if this is the first time round set the initial Blob(ish) SHA ID and the commit - if len(ids[idx]) == 0 { - copy(allShaBuf[20*(idx+1):20*(idx+2)], shaBuf) - ids[idx] = allShaBuf[20*(idx+1) : 20*(idx+2)] - commits[idx] = string(commitID) - } else if bytes.Equal(ids[idx], shaBuf) { - commits[idx] = string(commitID) - } - } - // FIXME: is there any order to the way strings are emitted from cat-file? - // if there is - then we could skip once we've passed all of our data - } - if _, err := batchReader.Discard(1); err != nil { - return nil, err - } - - break treeReadingLoop - } - - var treeID []byte - - // We're in the wrong directory - // Find target directory in this directory - idx := len(currentPath) - if idx > 0 { - idx++ - } - target := strings.SplitN(treePath[idx:], "/", 2)[0] - - for n < size { - // Read each tree entry in turn - mode, fname, sha, count, err := ParseTreeLine(batchReader, modeBuf, fnameBuf, shaBuf) - if err != nil { - return nil, err - } - n += int64(count) - - // if we have found the target directory - if bytes.Equal(fname, []byte(target)) && bytes.Equal(mode, []byte("40000")) { - copy(tmpTreeID, sha) - treeID = tmpTreeID - break - } - } - - if n < size { - // Discard any remaining entries in the current tree - discard := size - n - for discard > math.MaxInt32 { - _, err := batchReader.Discard(math.MaxInt32) - if err != nil { - return nil, err - } - discard -= math.MaxInt32 - } - _, err := batchReader.Discard(int(discard)) - if err != nil { - return nil, err - } - } - if _, err := batchReader.Discard(1); err != nil { - return nil, err - } - - // if we haven't found a treeID for the target directory our search is over - if len(treeID) == 0 { - break treeReadingLoop - } - - // add the target to the current path - if idx > 0 { - currentPath += "/" - } - currentPath += target - - // if we've now found the current path check its sha id and commit status - if treePath == currentPath && paths[0] == "" { - if len(ids[0]) == 0 { - copy(allShaBuf[0:20], treeID) - ids[0] = allShaBuf[0:20] - commits[0] = string(commitID) - } else if bytes.Equal(ids[0], treeID) { - commits[0] = string(commitID) - } - } - treeID = To40ByteSHA(treeID, treeID) - _, err = batchStdinWriter.Write(treeID) - if err != nil { - return nil, err - } - _, err = batchStdinWriter.Write([]byte("\n")) - if err != nil { - return nil, err - } - } - } - if scan.Err() != nil { - return nil, scan.Err() - } - - commitsMap := make(map[string]*Commit, len(commits)) + commitsMap := map[string]*Commit{} commitsMap[commit.ID.String()] = commit - commitCommits := make([]*Commit, len(commits)) - for i, commitID := range commits { + commitCommits := map[string]*Commit{} + for path, commitID := range revs { c, ok := commitsMap[commitID] if ok { - commitCommits[i] = c + commitCommits[path] = c continue } @@ -364,8 +161,8 @@ revListLoop: if _, err := batchReader.Discard(1); err != nil { return nil, err } - commitCommits[i] = c + commitCommits[path] = c } - return commitCommits, scan.Err() + return commitCommits, nil } diff --git a/modules/git/last_commit_cache_nogogit.go b/modules/git/last_commit_cache_nogogit.go index 3cbb0cca32e0..84c8ee132c26 100644 --- a/modules/git/last_commit_cache_nogogit.go +++ b/modules/git/last_commit_cache_nogogit.go @@ -88,9 +88,8 @@ func (c *LastCommitCache) recursiveCache(ctx context.Context, commit *Commit, tr return err } - for i, entryCommit := range commits { - entry := entryPaths[i] - if err := c.Put(commit.ID.String(), path.Join(treePath, entryPaths[i]), entryCommit.ID.String()); err != nil { + for entry, entryCommit := range commits { + if err := c.Put(commit.ID.String(), path.Join(treePath, entry), entryCommit.ID.String()); err != nil { return err } if entryMap[entry].IsDir() { diff --git a/modules/git/log_name_status.go b/modules/git/log_name_status.go new file mode 100644 index 000000000000..803d614d611a --- /dev/null +++ b/modules/git/log_name_status.go @@ -0,0 +1,398 @@ +// Copyright 2021 The Gitea Authors. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +package git + +import ( + "bufio" + "bytes" + "context" + "io" + "path" + "sort" + "strings" + + "github.com/djherbis/buffer" + "github.com/djherbis/nio/v3" +) + +// LogNameStatusRepo opens git log --raw in the provided repo and returns a stdin pipe, a stdout reader and cancel function +func LogNameStatusRepo(repository, head, treepath string, paths ...string) (*bufio.Reader, func()) { + // We often want to feed the commits in order into cat-file --batch, followed by their trees and sub trees as necessary. + // so let's create a batch stdin and stdout + stdoutReader, stdoutWriter := nio.Pipe(buffer.New(32 * 1024)) + cancel := func() { + _ = stdoutReader.Close() + _ = stdoutWriter.Close() + } + + args := make([]string, 0, 8+len(paths)) + args = append(args, "log", "--name-status", "-c", "--format=commit%x00%H %P%x00", "--parents", "--no-renames", "-t", "-z", head, "--") + if len(paths) < 70 { + if treepath != "" { + args = append(args, treepath) + for _, pth := range paths { + if pth != "" { + args = append(args, path.Join(treepath, pth)) + } + } + } else { + for _, pth := range paths { + if pth != "" { + args = append(args, pth) + } + } + } + } else if treepath != "" { + args = append(args, treepath) + } + + go func() { + stderr := strings.Builder{} + err := NewCommand(args...).RunInDirFullPipeline(repository, stdoutWriter, &stderr, nil) + if err != nil { + _ = stdoutWriter.CloseWithError(ConcatenateError(err, (&stderr).String())) + } else { + _ = stdoutWriter.Close() + } + }() + + // For simplicities sake we'll us a buffered reader to read from the cat-file --batch + bufReader := bufio.NewReaderSize(stdoutReader, 32*1024) + + return bufReader, cancel +} + +// LogNameStatusRepoParser parses a git log raw output from LogRawRepo +type LogNameStatusRepoParser struct { + treepath string + paths []string + next []byte + buffull bool + rd *bufio.Reader + cancel func() +} + +// NewLogNameStatusRepoParser returns a new parser for a git log raw output +func NewLogNameStatusRepoParser(repository, head, treepath string, paths ...string) *LogNameStatusRepoParser { + rd, cancel := LogNameStatusRepo(repository, head, treepath, paths...) + return &LogNameStatusRepoParser{ + treepath: treepath, + paths: paths, + rd: rd, + cancel: cancel, + } +} + +// LogNameStatusCommitData represents a commit artefact from git log raw +type LogNameStatusCommitData struct { + CommitID string + ParentIDs []string + Paths []bool +} + +// Next returns the next LogStatusCommitData +func (g *LogNameStatusRepoParser) Next(treepath string, paths2ids map[string]int, changed []bool, maxpathlen int) (*LogNameStatusCommitData, error) { + var err error + if g.next == nil || len(g.next) == 0 { + g.buffull = false + g.next, err = g.rd.ReadSlice('\x00') + if err != nil { + if err == bufio.ErrBufferFull { + g.buffull = true + } else if err == io.EOF { + return nil, nil + } else { + return nil, err + } + } + } + + ret := LogNameStatusCommitData{} + if bytes.Equal(g.next, []byte("commit\000")) { + g.next, err = g.rd.ReadSlice('\x00') + if err != nil { + if err == bufio.ErrBufferFull { + g.buffull = true + } else if err == io.EOF { + return nil, nil + } else { + return nil, err + } + } + } + + // Our "line" must look like: SP ( SP) * NUL + ret.CommitID = string(g.next[0:40]) + parents := string(g.next[41:]) + if g.buffull { + more, err := g.rd.ReadString('\x00') + if err != nil { + return nil, err + } + parents += more + } + parents = parents[:len(parents)-1] + ret.ParentIDs = strings.Split(parents, " ") + + // now read the next "line" + g.buffull = false + g.next, err = g.rd.ReadSlice('\x00') + if err != nil { + if err == bufio.ErrBufferFull { + g.buffull = true + } else if err != io.EOF { + return nil, err + } + } + + if err == io.EOF || !(g.next[0] == '\n' || g.next[0] == '\000') { + return &ret, nil + } + + // Ok we have some changes. + // This line will look like: NL NUL + // + // Subsequent lines will not have the NL - so drop it here - g.bufffull must also be false at this point too. + if g.next[0] == '\n' { + g.next = g.next[1:] + } else { + g.buffull = false + g.next, err = g.rd.ReadSlice('\x00') + if err != nil { + if err == bufio.ErrBufferFull { + g.buffull = true + } else if err != io.EOF { + return nil, err + } + } + if g.next[0] == '\x00' { + g.buffull = false + g.next, err = g.rd.ReadSlice('\x00') + if err != nil { + if err == bufio.ErrBufferFull { + g.buffull = true + } else if err != io.EOF { + return nil, err + } + } + } + } + + fnameBuf := make([]byte, 4096) + +diffloop: + for { + if err == io.EOF || bytes.Equal(g.next, []byte("commit\000")) { + return &ret, nil + } + g.next, err = g.rd.ReadSlice('\x00') + if err != nil { + if err == bufio.ErrBufferFull { + g.buffull = true + } else if err == io.EOF { + return &ret, nil + } else { + return nil, err + } + } + copy(fnameBuf, g.next) + if len(fnameBuf) < len(g.next) { + fnameBuf = append(fnameBuf, g.next[len(fnameBuf):]...) + } else { + fnameBuf = fnameBuf[:len(g.next)] + } + if err != nil { + if err != bufio.ErrBufferFull { + return nil, err + } + more, err := g.rd.ReadBytes('\x00') + if err != nil { + return nil, err + } + fnameBuf = append(fnameBuf, more...) + } + + // read the next line + g.buffull = false + g.next, err = g.rd.ReadSlice('\x00') + if err != nil { + if err == bufio.ErrBufferFull { + g.buffull = true + } else if err != io.EOF { + return nil, err + } + } + + if treepath != "" { + if !bytes.HasPrefix(fnameBuf, []byte(treepath)) { + fnameBuf = fnameBuf[:cap(fnameBuf)] + continue diffloop + } + } + fnameBuf = fnameBuf[len(treepath) : len(fnameBuf)-1] + if len(fnameBuf) > maxpathlen { + fnameBuf = fnameBuf[:cap(fnameBuf)] + continue diffloop + } + if len(fnameBuf) > 0 { + if len(treepath) > 0 { + if fnameBuf[0] != '/' || bytes.IndexByte(fnameBuf[1:], '/') >= 0 { + fnameBuf = fnameBuf[:cap(fnameBuf)] + continue diffloop + } + fnameBuf = fnameBuf[1:] + } else if bytes.IndexByte(fnameBuf, '/') >= 0 { + fnameBuf = fnameBuf[:cap(fnameBuf)] + continue diffloop + } + } + + idx, ok := paths2ids[string(fnameBuf)] + if !ok { + fnameBuf = fnameBuf[:cap(fnameBuf)] + continue diffloop + } + if ret.Paths == nil { + ret.Paths = changed + } + changed[idx] = true + } +} + +// Close closes the parser +func (g *LogNameStatusRepoParser) Close() { + g.cancel() +} + +// WalkGitLog walks the git log --name-status for the head commit in the provided treepath and files +func WalkGitLog(ctx context.Context, repo *Repository, head *Commit, treepath string, paths ...string) (map[string]string, error) { + tree, err := head.SubTree(treepath) + if err != nil { + return nil, err + } + + entries, err := tree.ListEntries() + if err != nil { + return nil, err + } + + if len(paths) == 0 { + paths = make([]string, 0, len(entries)+1) + paths = append(paths, "") + for _, entry := range entries { + paths = append(paths, entry.Name()) + } + } else { + sort.Strings(paths) + if paths[0] != "" { + paths = append([]string{""}, paths...) + } + // remove duplicates + for i := len(paths) - 1; i > 0; i-- { + if paths[i] == paths[i-1] { + paths = append(paths[:i-1], paths[i:]...) + } + } + } + + path2idx := map[string]int{} + maxpathlen := len(treepath) + + for i := range paths { + path2idx[paths[i]] = i + pthlen := len(paths[i]) + len(treepath) + 1 + if pthlen > maxpathlen { + maxpathlen = pthlen + } + } + + g := NewLogNameStatusRepoParser(repo.Path, head.ID.String(), treepath, paths...) + defer g.Close() + + results := make([]string, len(paths)) + remaining := len(paths) + nextRestart := (len(paths) * 3) / 4 + if nextRestart > 70 { + nextRestart = 70 + } + lastEmptyParent := head.ID.String() + commitSinceLastEmptyParent := uint64(0) + commitSinceNextRestart := uint64(0) + parentRemaining := map[string]bool{} + + changed := make([]bool, len(paths)) + +heaploop: + for { + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: + } + current, err := g.Next(treepath, path2idx, changed, maxpathlen) + if err != nil { + g.Close() + return nil, err + } + if current == nil { + break heaploop + } + delete(parentRemaining, current.CommitID) + if current.Paths != nil { + for i, found := range current.Paths { + if !found { + continue + } + changed[i] = false + if results[i] == "" { + results[i] = current.CommitID + delete(path2idx, paths[i]) + remaining-- + if results[0] == "" { + results[0] = current.CommitID + delete(path2idx, "") + remaining-- + } + } + } + } + + if remaining <= 0 { + break heaploop + } + commitSinceLastEmptyParent++ + if len(parentRemaining) == 0 { + lastEmptyParent = current.CommitID + commitSinceLastEmptyParent = 0 + } + if remaining <= nextRestart { + commitSinceNextRestart++ + if 4*commitSinceNextRestart > 3*commitSinceLastEmptyParent { + g.Close() + remainingPaths := make([]string, 0, len(paths)) + for i, pth := range paths { + if results[i] == "" { + remainingPaths = append(remainingPaths, pth) + } + } + g = NewLogNameStatusRepoParser(repo.Path, lastEmptyParent, treepath, remainingPaths...) + parentRemaining = map[string]bool{} + nextRestart = (remaining * 3) / 4 + continue heaploop + } + } + for _, parent := range current.ParentIDs { + parentRemaining[parent] = true + } + } + g.Close() + + resultsMap := map[string]string{} + for i, pth := range paths { + resultsMap[pth] = results[i] + } + + return resultsMap, nil +} diff --git a/modules/git/notes_nogogit.go b/modules/git/notes_nogogit.go index 2b927249954a..267087a86faf 100644 --- a/modules/git/notes_nogogit.go +++ b/modules/git/notes_nogogit.go @@ -68,7 +68,7 @@ func GetNote(ctx context.Context, repo *Repository, commitID string, note *Note) if err != nil { return err } - note.Commit = lastCommits[0] + note.Commit = lastCommits[path] return nil } diff --git a/modules/git/pipeline/lfs_nogogit.go b/modules/git/pipeline/lfs_nogogit.go index e618dd04b7a3..d3696fcda219 100644 --- a/modules/git/pipeline/lfs_nogogit.go +++ b/modules/git/pipeline/lfs_nogogit.go @@ -116,6 +116,9 @@ func FindLFSFile(repo *git.Repository, hash git.SHA1) ([]*LFSResult, error) { if err != nil { return nil, err } + if _, err := batchReader.Discard(1); err != nil { + return nil, err + } _, err := batchStdinWriter.Write([]byte(curCommit.Tree.ID.String() + "\n")) if err != nil { @@ -146,6 +149,9 @@ func FindLFSFile(repo *git.Repository, hash git.SHA1) ([]*LFSResult, error) { paths = append(paths, curPath+string(fname)+"/") } } + if _, err := batchReader.Discard(1); err != nil { + return nil, err + } if len(trees) > 0 { _, err := batchStdinWriter.Write(trees[len(trees)-1]) if err != nil { diff --git a/modules/git/repo_branch_gogit.go b/modules/git/repo_branch_gogit.go index b00253f6ffd6..e8386b2dbd98 100644 --- a/modules/git/repo_branch_gogit.go +++ b/modules/git/repo_branch_gogit.go @@ -13,6 +13,30 @@ import ( "github.com/go-git/go-git/v5/plumbing" ) +// IsObjectExist returns true if given reference exists in the repository. +func (repo *Repository) IsObjectExist(name string) bool { + if name == "" { + return false + } + + _, err := repo.gogitRepo.ResolveRevision(plumbing.Revision(name)) + + return err == nil +} + +// IsReferenceExist returns true if given reference exists in the repository. +func (repo *Repository) IsReferenceExist(name string) bool { + if name == "" { + return false + } + + reference, err := repo.gogitRepo.Reference(plumbing.ReferenceName(name), true) + if err != nil { + return false + } + return reference.Type() != plumbing.InvalidReference +} + // IsBranchExist returns true if given branch exists in current repository. func (repo *Repository) IsBranchExist(name string) bool { if name == "" { diff --git a/modules/git/repo_branch_nogogit.go b/modules/git/repo_branch_nogogit.go index 13ddcf06cf65..dd34e4889903 100644 --- a/modules/git/repo_branch_nogogit.go +++ b/modules/git/repo_branch_nogogit.go @@ -9,10 +9,28 @@ package git import ( "bufio" + "bytes" "io" "strings" ) +// IsObjectExist returns true if given reference exists in the repository. +func (repo *Repository) IsObjectExist(name string) bool { + if name == "" { + return false + } + + wr, rd, cancel := repo.CatFileBatchCheck() + defer cancel() + _, err := wr.Write([]byte(name + "\n")) + if err != nil { + log("Error writing to CatFileBatchCheck %v", err) + return false + } + sha, _, _, err := ReadBatchLine(rd) + return err == nil && bytes.HasPrefix(sha, []byte(strings.TrimSpace(name))) +} + // IsReferenceExist returns true if given reference exists in the repository. func (repo *Repository) IsReferenceExist(name string) bool { if name == "" { diff --git a/modules/git/repo_language_stats_nogogit.go b/modules/git/repo_language_stats_nogogit.go index abbf5e943ba4..46b084cf01e3 100644 --- a/modules/git/repo_language_stats_nogogit.go +++ b/modules/git/repo_language_stats_nogogit.go @@ -49,6 +49,9 @@ func (repo *Repository) GetLanguageStats(commitID string) (map[string]int64, err log("Unable to get commit for: %s. Err: %v", commitID, err) return nil, err } + if _, err = batchReader.Discard(1); err != nil { + return nil, err + } tree := commit.Tree diff --git a/modules/indexer/code/bleve.go b/modules/indexer/code/bleve.go index 17128052f491..600789a28409 100644 --- a/modules/indexer/code/bleve.go +++ b/modules/indexer/code/bleve.go @@ -216,6 +216,9 @@ func (b *BleveIndexer) addUpdate(batchWriter git.WriteCloserError, batchReader * return nil } + if _, err = batchReader.Discard(1); err != nil { + return err + } id := filenameIndexerID(repo.ID, update.Filename) return batch.Index(id, &RepoIndexerData{ RepoID: repo.ID, diff --git a/modules/indexer/code/elastic_search.go b/modules/indexer/code/elastic_search.go index 16d4a1821a2f..38a97ad888c0 100644 --- a/modules/indexer/code/elastic_search.go +++ b/modules/indexer/code/elastic_search.go @@ -215,6 +215,9 @@ func (b *ElasticSearchIndexer) addUpdate(batchWriter git.WriteCloserError, batch return nil, nil } + if _, err = batchReader.Discard(1); err != nil { + return nil, err + } id := filenameIndexerID(repo.ID, update.Filename) return []elastic.BulkableRequest{ diff --git a/modules/markup/html.go b/modules/markup/html.go index 2a83b8716e0f..edf860da4510 100644 --- a/modules/markup/html.go +++ b/modules/markup/html.go @@ -286,6 +286,7 @@ var tagCleaner = regexp.MustCompile(`<((?:/?\w+/\w+)|(?:/[\w ]+/)|(/?[hH][tT][mM var nulCleaner = strings.NewReplacer("\000", "") func postProcess(ctx *RenderContext, procs []processor, input io.Reader, output io.Writer) error { + defer ctx.Cancel() // FIXME: don't read all content to memory rawHTML, err := ioutil.ReadAll(input) if err != nil { @@ -996,6 +997,9 @@ func sha1CurrentPatternProcessor(ctx *RenderContext, node *html.Node) { start := 0 next := node.NextSibling + if ctx.ShaExistCache == nil { + ctx.ShaExistCache = make(map[string]bool) + } for node != nil && node != next && start < len(node.Data) { m := sha1CurrentPattern.FindStringSubmatchIndex(node.Data[start:]) if m == nil { @@ -1013,10 +1017,28 @@ func sha1CurrentPatternProcessor(ctx *RenderContext, node *html.Node) { // as used by git and github for linking and thus we have to do similar. // Because of this, we check to make sure that a matched hash is actually // a commit in the repository before making it a link. - if _, err := git.NewCommand("rev-parse", "--verify", hash).RunInDirBytes(ctx.Metas["repoPath"]); err != nil { - if !strings.Contains(err.Error(), "fatal: Needed a single revision") { - log.Debug("sha1CurrentPatternProcessor git rev-parse: %v", err) + + // check cache first + exist, inCache := ctx.ShaExistCache[hash] + if !inCache { + if ctx.GitRepo == nil { + var err error + ctx.GitRepo, err = git.OpenRepository(ctx.Metas["repoPath"]) + if err != nil { + log.Error("unable to open repository: %s Error: %v", ctx.Metas["repoPath"], err) + return + } + ctx.AddCancel(func() { + ctx.GitRepo.Close() + ctx.GitRepo = nil + }) } + + exist = ctx.GitRepo.IsObjectExist(hash) + ctx.ShaExistCache[hash] = exist + } + + if !exist { start = m[3] continue } diff --git a/modules/markup/renderer.go b/modules/markup/renderer.go index 5d35bd5a6771..d60c8ad71066 100644 --- a/modules/markup/renderer.go +++ b/modules/markup/renderer.go @@ -13,6 +13,7 @@ import ( "strings" "sync" + "code.gitea.io/gitea/modules/git" "code.gitea.io/gitea/modules/setting" ) @@ -35,13 +36,44 @@ func Init() { // RenderContext represents a render context type RenderContext struct { - Ctx context.Context - Filename string - Type string - IsWiki bool - URLPrefix string - Metas map[string]string - DefaultLink string + Ctx context.Context + Filename string + Type string + IsWiki bool + URLPrefix string + Metas map[string]string + DefaultLink string + GitRepo *git.Repository + ShaExistCache map[string]bool + cancelFn func() +} + +// Cancel runs any cleanup functions that have been registered for this Ctx +func (ctx *RenderContext) Cancel() { + if ctx == nil { + return + } + ctx.ShaExistCache = map[string]bool{} + if ctx.cancelFn == nil { + return + } + ctx.cancelFn() +} + +// AddCancel adds the provided fn as a Cleanup for this Ctx +func (ctx *RenderContext) AddCancel(fn func()) { + if ctx == nil { + return + } + oldCancelFn := ctx.cancelFn + if oldCancelFn == nil { + ctx.cancelFn = fn + return + } + ctx.cancelFn = func() { + defer oldCancelFn() + fn() + } } // Renderer defines an interface for rendering markup file to HTML diff --git a/modules/setting/repository.go b/modules/setting/repository.go index a6fc73651a30..a7666895e1f3 100644 --- a/modules/setting/repository.go +++ b/modules/setting/repository.go @@ -78,6 +78,7 @@ var ( DefaultMergeMessageAllAuthors bool DefaultMergeMessageMaxApprovers int DefaultMergeMessageOfficialApproversOnly bool + PopulateSquashCommentWithCommitMessages bool } `ini:"repository.pull-request"` // Issue Setting @@ -199,6 +200,7 @@ var ( DefaultMergeMessageAllAuthors bool DefaultMergeMessageMaxApprovers int DefaultMergeMessageOfficialApproversOnly bool + PopulateSquashCommentWithCommitMessages bool }{ WorkInProgressPrefixes: []string{"WIP:", "[WIP]"}, // Same as GitHub. See @@ -210,6 +212,7 @@ var ( DefaultMergeMessageAllAuthors: false, DefaultMergeMessageMaxApprovers: 10, DefaultMergeMessageOfficialApproversOnly: true, + PopulateSquashCommentWithCommitMessages: false, }, // Issue settings diff --git a/options/locale/locale_de-DE.ini b/options/locale/locale_de-DE.ini index d26ac2606600..2cc2e7df78d0 100644 --- a/options/locale/locale_de-DE.ini +++ b/options/locale/locale_de-DE.ini @@ -91,8 +91,10 @@ loading=Laden… step1=Schritt 1: step2=Schritt 2: +error=Fehler error404=Die Seite, die du gerade versuchst aufzurufen, existiert entweder nicht oder du bist nicht berechtigt, diese anzusehen. +never=Niemals [error] occurred=Ein Fehler ist aufgetreten @@ -724,6 +726,7 @@ mirror_prune_desc=Entferne veraltete remote-tracking Referenzen mirror_interval=Spiegel-Intervall (gültige Zeiteinheiten sind 'h', 'm', 's'). 0 schaltet die automatische Synchronisierung aus. mirror_interval_invalid=Das Spiegel-Intervall ist ungültig. mirror_address=Klonen via URL +mirror_address_desc=Gib alle erforderlichen Anmeldedaten im Abschnitt "Authentifizierung" ein. mirror_address_url_invalid=Die angegebene URL ist ungültig. Achte darauf, alle URL-Komponenten korrekt zu maskieren. mirror_address_protocol_invalid=Die angegebene URL ist ungültig. Nur Pfade beginnend mit http(s):// oder git:// können gespiegelt werden. mirror_lfs=Großdatei-Speicher (LFS) @@ -731,6 +734,9 @@ mirror_lfs_desc=Mirroring von LFS-Dateien aktivieren. mirror_lfs_endpoint=LFS-Endpunkt mirror_lfs_endpoint_desc=Sync wird versuchen, die Klon-URL zu verwenden, um den LFS-Server zu bestimmen. Du kannst auch einen eigenen Endpunkt angeben, wenn die LFS-Dateien woanders gespeichert werden. mirror_last_synced=Zuletzt synchronisiert +mirror_password_placeholder=(unverändert) +mirror_password_blank_placeholder=(Nicht gesetzt) +mirror_password_help=Ändere den Benutzernamen, um ein gespeichertes Passwort zu löschen. watchers=Beobachter stargazers=Favorisiert von forks=Forks @@ -783,6 +789,7 @@ form.reach_limit_of_creation_n=Du hast bereits dein Limit von %d Repositories er form.name_reserved=Der Repository-Name „%s“ ist reserviert. form.name_pattern_not_allowed='%s' ist nicht erlaubt für Repository-Namen. +need_auth=Authentifizierung migrate_options=Migrationsoptionen migrate_service=Migrationsdienst migrate_options_mirror_helper=Dieses Repository wird ein Mirror sein @@ -816,11 +823,18 @@ migrated_from_fake=Migriert von %[1]s migrate.migrate=Migrieren von %s migrate.migrating=Migriere von %s ... migrate.migrating_failed=Migrieren von %s fehlgeschlagen. +migrate.migrating_failed.error=Fehler: %s migrate.github.description=Migriere Daten von Github.com oder Github Enterprise. migrate.git.description=Migriere oder spiegele git-Daten von Git-Services migrate.gitlab.description=Migriere Daten von GitLab.com oder einem selbst gehostetem gitlab Server. migrate.gitea.description=Migriere Daten von Gitea.com oder einem selbst gehostetem Gitea Server. migrate.gogs.description=Migriere Daten von notabug.org oder einem anderen, selbst gehosteten Gogs Server. +migrate.migrating_git=Git Daten werden migriert +migrate.migrating_milestones=Meilensteine werden migriert +migrate.migrating_labels=Labels werden migriert +migrate.migrating_releases=Releases werden migriert +migrate.migrating_issues=Issues werden migriert +migrate.migrating_pulls=Pull Requests werden migriert mirror_from=Mirror von forked_from=geforkt von @@ -1314,6 +1328,10 @@ pulls.manually_merged_as=Dieser Pull Request wurde manuell als Beginne den Titel mit %s um zu verhindern, dass der Pull Request versehentlich gemergt wird.` +pulls.cannot_merge_work_in_progress=Dieser Pull Request ist als Work in Progress markiert. +pulls.still_in_progress=Noch in Bearbeitung? +pulls.add_prefix=%s Präfix hinzufügen +pulls.remove_prefix=%s Präfix entfernen pulls.data_broken=Dieser Pull-Requests ist kaputt, da Fork-Informationen gelöscht wurden. pulls.files_conflicted=Dieser Pull-Request hat Änderungen, die im Widerspruch zum Ziel-Branch stehen. pulls.is_checking=Die Konfliktprüfung läuft noch. Bitte aktualisiere die Seite in wenigen Augenblicken. @@ -1539,6 +1557,13 @@ settings.hooks=Webhooks settings.githooks=Git-Hooks settings.basic_settings=Grundeinstellungen settings.mirror_settings=Mirror-Einstellungen +settings.mirror_settings.direction=Richtung +settings.mirror_settings.direction.pull=Pull +settings.mirror_settings.direction.push=Push +settings.mirror_settings.last_update=Letzte Aktualisierung +settings.mirror_settings.push_mirror.none=Keine Push-Mirrors konfiguriert +settings.mirror_settings.push_mirror.remote_url=URL zum Git-Remote-Repository +settings.mirror_settings.push_mirror.add=Push-Mirror hinzufügen settings.sync_mirror=Jetzt synchronisieren settings.mirror_sync_in_progress=Mirror-Synchronisierung wird zurzeit ausgeführt. Komm in ein paar Minuten zurück. settings.email_notifications.enable=E-Mail Benachrichtigungen aktivieren @@ -1604,6 +1629,7 @@ settings.transfer_form_title=Gib den Repository-Namen zur Bestätigung ein: settings.transfer_in_progress=Es gibt derzeit eine laufende Übertragung. Bitte brich diese ab, wenn du dieses Repository an einen anderen Benutzer übertragen möchtest. settings.transfer_notices_1=– Du wirst keinen Zugriff mehr haben, wenn der neue Besitzer ein individueller Benutzer ist. settings.transfer_notices_2=– Du wirst weiterhin Zugriff haben, wenn der neue Besitzer eine Organisation ist und du einer der Besitzer bist. +settings.transfer_notices_3=- Wenn das Repository privat ist und an einen einzelnen Benutzer übertragen wird, wird sichergestellt, dass der Benutzer mindestens Leserechte hat (und die Berechtigungen werden gegebenenfalls ändert). settings.transfer_owner=Neuer Besitzer settings.transfer_perform=Übertragung durchführen settings.transfer_started=Für dieses Repository wurde eine Übertragung eingeleitet und wartet nun auf die Bestätigung von "%s" diff --git a/options/locale/locale_es-ES.ini b/options/locale/locale_es-ES.ini index b5f3d7d3c6ec..2ece1992e619 100644 --- a/options/locale/locale_es-ES.ini +++ b/options/locale/locale_es-ES.ini @@ -91,8 +91,10 @@ loading=Cargando… step1=Paso 1: step2=Paso 2: +error=Error error404=La página a la que está intentando acceder o no existe o no está autorizado para verla. +never=Nunca [error] occurred=Se ha producido un error @@ -731,6 +733,7 @@ mirror_lfs_desc=Activar la reproducción de datos LFS. mirror_lfs_endpoint=Punto final de LFS mirror_lfs_endpoint_desc=Sync intentará usar la url del clon para determinar el servidor LFS. También puede especificar un punto final personalizado si los datos LFS del repositorio se almacenan en otro lugar. mirror_last_synced=Sincronizado por última vez +mirror_password_placeholder=(Sin cambios) watchers=Seguidores stargazers=Fans forks=Forks @@ -783,6 +786,7 @@ form.reach_limit_of_creation_n=Ya han alcanzado su límite de repositorios de %d form.name_reserved=El nombre de repositorio '%s' está reservado. form.name_pattern_not_allowed=El patrón '%s' no está permitido en un nombre de repositorio. +need_auth=Autorización migrate_options=Opciones de migración migrate_service=Servicio de Migración migrate_options_mirror_helper=Este repositorio será uno replicado @@ -1314,6 +1318,7 @@ pulls.manually_merged_as=El Pull Request se ha fusionado manualmente como Comience el título con %s para prevenir que el pull request se fusione accidentalmente.` +pulls.still_in_progress=¿Aún en curso? pulls.data_broken=Este pull request está rota debido a que falta información del fork. pulls.files_conflicted=Este pull request tiene cambios en conflicto con la rama de destino. pulls.is_checking=La comprobación de conflicto de fusión está en progreso. Inténtalo de nuevo en unos momentos. @@ -1539,6 +1544,11 @@ settings.hooks=Webhooks settings.githooks=Git Hooks settings.basic_settings=Configuración Básica settings.mirror_settings=Configuración de réplica +settings.mirror_settings.docs=Configure su proyecto para insertar y/o extraer automáticamente los cambios hacia/desde otro repositorio. Las ramas, etiquetas y commits se sincronizarán automáticamente. ¿Cómo replico los repositorios? +settings.mirror_settings.direction=Dirección +settings.mirror_settings.direction.pull=Pull +settings.mirror_settings.last_update=Última actualización +settings.mirror_settings.push_mirror.remote_url=URL del repositorio remoto de Git settings.sync_mirror=Sincronizar ahora settings.mirror_sync_in_progress=La sincronización del repositorio replicado está en curso. Vuelva a intentarlo más tarde. settings.email_notifications.enable=Habilitar las notificaciones por correo electrónico @@ -1604,6 +1614,7 @@ settings.transfer_form_title=Escriba el nombre del repositorio como confirmació settings.transfer_in_progress=Actualmente hay una transferencia en curso. Por favor, cancela si quieres transferir este repositorio a otro usuario. settings.transfer_notices_1=- Perderá el acceso al repositorio si lo transfiere a un usuario individual. settings.transfer_notices_2=- Mantendrá el acceso al repositorio si lo transfiere a una organización que usted (co-)posee. +settings.transfer_notices_3=- Si el repositorio es privado y se transfiere a un usuario individual, esta acción se asegura de que el usuario tenga al menos permisos de lectura (y cambie los permisos si es necesario). settings.transfer_owner=Nuevo Propietario settings.transfer_perform=Realizar transferencia settings.transfer_started=Este repositorio ha sido marcado para transferencia y espera confirmación de "%s" @@ -1974,6 +1985,10 @@ branch.restore=Restaurar rama '%s' branch.download=Descargar rama '%s' branch.included_desc=Esta rama forma parte de la predeterminada branch.included=Incluida +branch.create_new_branch=Crear rama desde la rama: +branch.confirm_create_branch=Crear rama +branch.new_branch=Crear nueva rama +branch.new_branch_from=Crear nueva rama desde '%s' tag.create_tag=Crear etiqueta %s tag.create_success=La etiqueta '%s' ha sido creada. diff --git a/options/locale/locale_fr-FR.ini b/options/locale/locale_fr-FR.ini index c93bf817bd4f..b96e0d56eb5b 100644 --- a/options/locale/locale_fr-FR.ini +++ b/options/locale/locale_fr-FR.ini @@ -1263,8 +1263,8 @@ issues.review.dismissed_label=Rejeté issues.review.left_comment=laisser un commentaire issues.review.content.empty=Vous devez laisser un commentaire indiquant le(s) changement(s) demandé(s). issues.review.reject=a requis les changements %s -issues.review.wait=a demandé une révision pour %s -issues.review.add_review_request=demande de révision de %s %s +issues.review.wait=a été sollicité pour une révision %s +issues.review.add_review_request=a demandé une révision de %s %s issues.review.remove_review_request=a supprimé la demande de révision pour %s %s issues.review.remove_review_request_self=a refusé la revue %s issues.review.pending=En attente diff --git a/options/locale/locale_ru-RU.ini b/options/locale/locale_ru-RU.ini index 0f6361d6f52e..a7e1dadbccd6 100644 --- a/options/locale/locale_ru-RU.ini +++ b/options/locale/locale_ru-RU.ini @@ -1995,7 +1995,7 @@ settings.visibility.public=Публичный settings.visibility.limited=Ограничено (Видно только для авторизованных пользователей) settings.visibility.limited_shortname=Ограничить settings.visibility.private=Частный (Видимый только для участников организации) -settings.visibility.private_shortname=Приватизировать +settings.visibility.private_shortname=Приватный settings.update_settings=Обновить настройки settings.update_setting_success=Настройки организации обновлены. diff --git a/options/locale/locale_zh-CN.ini b/options/locale/locale_zh-CN.ini index fd59289b16a3..1eba2d89a409 100644 --- a/options/locale/locale_zh-CN.ini +++ b/options/locale/locale_zh-CN.ini @@ -91,8 +91,10 @@ loading=正在加载... step1=第一步: step2=第二步: +error=错误 error404=您正尝试访问的页面 不存在您尚未被授权 查看该页面。 +never=从不 [error] occurred=发生错误 @@ -724,6 +726,7 @@ mirror_prune_desc=删除过时的远程跟踪引用 mirror_interval=镜像间隔 (有效时间单位为 "h"、"m"、"s")。0将禁用自动同步。 mirror_interval_invalid=镜像间隔无效。 mirror_address=从URL克隆 +mirror_address_desc=在授权框中输入必要的凭据。 mirror_address_url_invalid=URL无效。请检查您所输入的URL是否正确。 mirror_address_protocol_invalid=提供的 url 无效。只能从 http(s):// 或 git:// 位置进行镜像。 mirror_lfs=大文件存储 (LFS) @@ -786,6 +789,7 @@ form.reach_limit_of_creation_n=你已经达到了 %d 个仓库的上限。 form.name_reserved=仓库名称 '%s' 是被保留的。 form.name_pattern_not_allowed=仓库名称中不允许使用模式 "%s"。 +need_auth=授权 migrate_options=迁移选项 migrate_service=迁移服务 migrate_options_mirror_helper=该仓库将是一个 镜像 @@ -819,11 +823,19 @@ migrated_from_fake=从 %[1]s 迁移成功 migrate.migrate=从 %s 迁移 migrate.migrating=正在从 %s 迁移... migrate.migrating_failed=从 %s 迁移失败。 +migrate.migrating_failed.error=错误:%s migrate.github.description=从 Github.com 或者 Github Enterprise 迁移数据 migrate.git.description=从 Git 迁移数据 migrate.gitlab.description=从 GitLab.com 或者 自部署 GitLab 迁移数据 migrate.gitea.description=从 Gitea.com 或 自托管 Gitea 服务器迁移数据。 migrate.gogs.description=正从 notabug.org 或其他自托管 Gogs 服务器迁移数据。 +migrate.migrating_git=迁移Git数据 +migrate.migrating_topics=迁移主题 +migrate.migrating_milestones=迁移里程碑 +migrate.migrating_labels=迁移标签 +migrate.migrating_releases=迁移发布 +migrate.migrating_issues=迁移工单 +migrate.migrating_pulls=迁移合并请求 mirror_from=镜像自地址 forked_from=派生自 @@ -1546,6 +1558,15 @@ settings.hooks=Web 钩子 settings.githooks=管理 Git 钩子 settings.basic_settings=基本设置 settings.mirror_settings=镜像设置 +settings.mirror_settings.docs=将你的项目设置成自动从其它存储库推送或拉取变更。分支、标签以及提交将会自动同步。如何镜像存储库? +settings.mirror_settings.mirrored_repository=镜像库 +settings.mirror_settings.direction=方向 +settings.mirror_settings.direction.pull=拉取 +settings.mirror_settings.direction.push=推送 +settings.mirror_settings.last_update=最后更新 +settings.mirror_settings.push_mirror.none=未配置推送镜像 +settings.mirror_settings.push_mirror.remote_url=Git 远程存储库链接 +settings.mirror_settings.push_mirror.add=添加推送镜像 settings.sync_mirror=同步 settings.mirror_sync_in_progress=镜像同步正在进行中,请稍后后再试。 settings.email_notifications.enable=启用邮件通知 @@ -1611,6 +1632,7 @@ settings.transfer_form_title=输入仓库名称以做确认: settings.transfer_in_progress=当前正在进行转让。 如果你想将此代码库转让给另一个用户,请取消它。 settings.transfer_notices_1=-如果将其传输给单个用户, 您将失去对存储库的访问权限。 settings.transfer_notices_2=-如果将其转移到您 (共同) 拥有的组织,您可以继续访问该仓库。 +settings.transfer_notices_3=- 如果存储库是私有的并且被转移给某个用户,那么此操作可以确保该用户至少具有读权限(以及必要时的更改权限)。 settings.transfer_owner=新拥有者 settings.transfer_perform=执行转让 settings.transfer_started=该代码库已被标记为转让并等待来自 %s 的确认 diff --git a/options/locale/locale_zh-TW.ini b/options/locale/locale_zh-TW.ini index 81e7b7e0afdc..9cd7c237b076 100644 --- a/options/locale/locale_zh-TW.ini +++ b/options/locale/locale_zh-TW.ini @@ -676,7 +676,7 @@ email_notifications.disable=關閉郵件通知 email_notifications.submit=套用郵件偏好設定 [repo] -new_repo_helper=儲存庫包含所以專案檔案,包含修訂歷史。已經存放於別處了嗎?遷移儲存庫。 +new_repo_helper=儲存庫包含所有專案檔案,包含修訂歷史。已經存放於別處了嗎?遷移儲存庫。 owner=擁有者 owner_helper=組織可能因為儲存庫數量上限而未列入此選單。 repo_name=儲存庫名稱 diff --git a/routers/web/org/home.go b/routers/web/org/home.go index d84ae870ab6d..ad14f1845444 100644 --- a/routers/web/org/home.go +++ b/routers/web/org/home.go @@ -41,6 +41,7 @@ func Home(ctx *context.Context) { desc, err := markdown.RenderString(&markup.RenderContext{ URLPrefix: ctx.Repo.RepoLink, Metas: map[string]string{"mode": "document"}, + GitRepo: ctx.Repo.GitRepo, }, org.Description) if err != nil { ctx.ServerError("RenderString", err) diff --git a/routers/web/repo/issue.go b/routers/web/repo/issue.go index fd2877e7069d..a7951b6bce18 100644 --- a/routers/web/repo/issue.go +++ b/routers/web/repo/issue.go @@ -1137,6 +1137,7 @@ func ViewIssue(ctx *context.Context) { issue.RenderedContent, err = markdown.RenderString(&markup.RenderContext{ URLPrefix: ctx.Repo.RepoLink, Metas: ctx.Repo.Repository.ComposeMetas(), + GitRepo: ctx.Repo.GitRepo, }, issue.Content) if err != nil { ctx.ServerError("RenderString", err) @@ -1301,6 +1302,7 @@ func ViewIssue(ctx *context.Context) { comment.RenderedContent, err = markdown.RenderString(&markup.RenderContext{ URLPrefix: ctx.Repo.RepoLink, Metas: ctx.Repo.Repository.ComposeMetas(), + GitRepo: ctx.Repo.GitRepo, }, comment.Content) if err != nil { ctx.ServerError("RenderString", err) @@ -1376,6 +1378,7 @@ func ViewIssue(ctx *context.Context) { comment.RenderedContent, err = markdown.RenderString(&markup.RenderContext{ URLPrefix: ctx.Repo.RepoLink, Metas: ctx.Repo.Repository.ComposeMetas(), + GitRepo: ctx.Repo.GitRepo, }, comment.Content) if err != nil { ctx.ServerError("RenderString", err) @@ -1734,6 +1737,7 @@ func UpdateIssueContent(ctx *context.Context) { content, err := markdown.RenderString(&markup.RenderContext{ URLPrefix: ctx.Query("context"), Metas: ctx.Repo.Repository.ComposeMetas(), + GitRepo: ctx.Repo.GitRepo, }, issue.Content) if err != nil { ctx.ServerError("RenderString", err) @@ -2161,6 +2165,7 @@ func UpdateCommentContent(ctx *context.Context) { content, err := markdown.RenderString(&markup.RenderContext{ URLPrefix: ctx.Query("context"), Metas: ctx.Repo.Repository.ComposeMetas(), + GitRepo: ctx.Repo.GitRepo, }, comment.Content) if err != nil { ctx.ServerError("RenderString", err) diff --git a/routers/web/repo/milestone.go b/routers/web/repo/milestone.go index bb6b310cbe8d..4cdca38dd02b 100644 --- a/routers/web/repo/milestone.go +++ b/routers/web/repo/milestone.go @@ -88,6 +88,7 @@ func Milestones(ctx *context.Context) { m.RenderedContent, err = markdown.RenderString(&markup.RenderContext{ URLPrefix: ctx.Repo.RepoLink, Metas: ctx.Repo.Repository.ComposeMetas(), + GitRepo: ctx.Repo.GitRepo, }, m.Content) if err != nil { ctx.ServerError("RenderString", err) @@ -280,6 +281,7 @@ func MilestoneIssuesAndPulls(ctx *context.Context) { milestone.RenderedContent, err = markdown.RenderString(&markup.RenderContext{ URLPrefix: ctx.Repo.RepoLink, Metas: ctx.Repo.Repository.ComposeMetas(), + GitRepo: ctx.Repo.GitRepo, }, milestone.Content) if err != nil { ctx.ServerError("RenderString", err) diff --git a/routers/web/repo/projects.go b/routers/web/repo/projects.go index eb0719995cb5..c7490893d5fe 100644 --- a/routers/web/repo/projects.go +++ b/routers/web/repo/projects.go @@ -81,6 +81,7 @@ func Projects(ctx *context.Context) { projects[i].RenderedContent, err = markdown.RenderString(&markup.RenderContext{ URLPrefix: ctx.Repo.RepoLink, Metas: ctx.Repo.Repository.ComposeMetas(), + GitRepo: ctx.Repo.GitRepo, }, projects[i].Description) if err != nil { ctx.ServerError("RenderString", err) @@ -322,6 +323,7 @@ func ViewProject(ctx *context.Context) { project.RenderedContent, err = markdown.RenderString(&markup.RenderContext{ URLPrefix: ctx.Repo.RepoLink, Metas: ctx.Repo.Repository.ComposeMetas(), + GitRepo: ctx.Repo.GitRepo, }, project.Description) if err != nil { ctx.ServerError("RenderString", err) diff --git a/routers/web/repo/release.go b/routers/web/repo/release.go index b7730e4ee25e..3b700e80160c 100644 --- a/routers/web/repo/release.go +++ b/routers/web/repo/release.go @@ -145,6 +145,7 @@ func releasesOrTags(ctx *context.Context, isTagList bool) { r.Note, err = markdown.RenderString(&markup.RenderContext{ URLPrefix: ctx.Repo.RepoLink, Metas: ctx.Repo.Repository.ComposeMetas(), + GitRepo: ctx.Repo.GitRepo, }, r.Note) if err != nil { ctx.ServerError("RenderString", err) @@ -213,6 +214,7 @@ func SingleRelease(ctx *context.Context) { release.Note, err = markdown.RenderString(&markup.RenderContext{ URLPrefix: ctx.Repo.RepoLink, Metas: ctx.Repo.Repository.ComposeMetas(), + GitRepo: ctx.Repo.GitRepo, }, release.Note) if err != nil { ctx.ServerError("RenderString", err) diff --git a/routers/web/repo/view.go b/routers/web/repo/view.go index cd5b0f43edbc..74e2a2959772 100644 --- a/routers/web/repo/view.go +++ b/routers/web/repo/view.go @@ -338,6 +338,7 @@ func renderDirectory(ctx *context.Context, treeLink string) { Filename: readmeFile.name, URLPrefix: readmeTreelink, Metas: ctx.Repo.Repository.ComposeDocumentMetas(), + GitRepo: ctx.Repo.GitRepo, }, rd, &result) if err != nil { log.Error("Render failed: %v then fallback", err) @@ -512,6 +513,7 @@ func renderFile(ctx *context.Context, entry *git.TreeEntry, treeLink, rawLink st Filename: blob.Name(), URLPrefix: path.Dir(treeLink), Metas: ctx.Repo.Repository.ComposeDocumentMetas(), + GitRepo: ctx.Repo.GitRepo, }, rd, &result) if err != nil { ctx.ServerError("Render", err) @@ -570,6 +572,7 @@ func renderFile(ctx *context.Context, entry *git.TreeEntry, treeLink, rawLink st Filename: blob.Name(), URLPrefix: path.Dir(treeLink), Metas: ctx.Repo.Repository.ComposeDocumentMetas(), + GitRepo: ctx.Repo.GitRepo, }, rd, &result) if err != nil { ctx.ServerError("Render", err) diff --git a/routers/web/user/profile.go b/routers/web/user/profile.go index e66820e1317b..72d00666453e 100644 --- a/routers/web/user/profile.go +++ b/routers/web/user/profile.go @@ -117,6 +117,7 @@ func Profile(ctx *context.Context) { content, err := markdown.RenderString(&markup.RenderContext{ URLPrefix: ctx.Repo.RepoLink, Metas: map[string]string{"mode": "document"}, + GitRepo: ctx.Repo.GitRepo, }, ctxUser.Description) if err != nil { ctx.ServerError("RenderString", err) diff --git a/services/pull/pull.go b/services/pull/pull.go index cc560fb199d2..02c0a7fe7c87 100644 --- a/services/pull/pull.go +++ b/services/pull/pull.go @@ -570,16 +570,44 @@ func GetSquashMergeCommitMessages(pr *models.PullRequest) string { authors := make([]string, 0, list.Len()) stringBuilder := strings.Builder{} - stringBuilder.WriteString(pr.Issue.Content) - if stringBuilder.Len() > 0 { - stringBuilder.WriteRune('\n') - stringBuilder.WriteRune('\n') + if !setting.Repository.PullRequest.PopulateSquashCommentWithCommitMessages { + stringBuilder.WriteString(pr.Issue.Content) + if stringBuilder.Len() > 0 { + stringBuilder.WriteRune('\n') + stringBuilder.WriteRune('\n') + } } // commits list is in reverse chronological order element := list.Back() for element != nil { commit := element.Value.(*git.Commit) + + if setting.Repository.PullRequest.PopulateSquashCommentWithCommitMessages { + maxSize := setting.Repository.PullRequest.DefaultMergeMessageSize + if maxSize < 0 || stringBuilder.Len() < maxSize { + var toWrite []byte + if element == list.Back() { + toWrite = []byte(strings.TrimPrefix(commit.CommitMessage, pr.Issue.Title)) + } else { + toWrite = []byte(commit.CommitMessage) + } + + if len(toWrite) > maxSize-stringBuilder.Len() && maxSize > -1 { + toWrite = append(toWrite[:maxSize-stringBuilder.Len()], "..."...) + } + if _, err := stringBuilder.Write(toWrite); err != nil { + log.Error("Unable to write commit message Error: %v", err) + return "" + } + + if _, err := stringBuilder.WriteRune('\n'); err != nil { + log.Error("Unable to write commit message Error: %v", err) + return "" + } + } + } + authorString := commit.Author.String() if !authorsMap[authorString] && authorString != posterSig { authors = append(authors, authorString) diff --git a/templates/user/dashboard/repolist.tmpl b/templates/user/dashboard/repolist.tmpl index 8ac07e1df63e..f39d3711d473 100644 --- a/templates/user/dashboard/repolist.tmpl +++ b/templates/user/dashboard/repolist.tmpl @@ -9,7 +9,7 @@ :more-repos-link="'{{.ContextUser.HomeLink}}'" {{if not .ContextUser.IsOrganization}} :organizations="[ - {{range .ContextUser.Orgs}} + {{range .Orgs}} {name: '{{.Name}}', num_repos: '{{.NumRepos}}'}, {{end}} ]" diff --git a/vendor/github.com/djherbis/buffer/.travis.yml b/vendor/github.com/djherbis/buffer/.travis.yml new file mode 100644 index 000000000000..7d03fb1ffb66 --- /dev/null +++ b/vendor/github.com/djherbis/buffer/.travis.yml @@ -0,0 +1,20 @@ +language: go +go: +- tip +before_install: + - go get golang.org/x/lint/golint + - go get github.com/axw/gocov/gocov + - go get github.com/mattn/goveralls + - if ! go get code.google.com/p/go.tools/cmd/cover; then go get golang.org/x/tools/cmd/cover; fi +script: + - '[ "${TRAVIS_PULL_REQUEST}" != "false" ] || $HOME/gopath/bin/goveralls -service=travis-ci -repotoken $COVERALLS_TOKEN' + - $HOME/gopath/bin/golint ./... + - go vet + - go test -v ./... +notifications: + email: + on_success: never + on_failure: change +env: + global: + secure: X2uEipzLOL7IDFQgiJdKQvA7gWw746gmU4HoLr73Au+mDZnIaYfpM7pR0r9S9DY23obmflOBFytB9IIyr6Ganhs8KDd6osBS3JSu5ydZKhoHDshSZHxW6GdCiR0Ya85JZ2k/CzwuZ95FcCTztXG59D8VhAoM+8gNW6VLK2mL60Y= diff --git a/vendor/github.com/djherbis/buffer/LICENSE.txt b/vendor/github.com/djherbis/buffer/LICENSE.txt new file mode 100644 index 000000000000..f5daa194f780 --- /dev/null +++ b/vendor/github.com/djherbis/buffer/LICENSE.txt @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2015 Dustin H + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/djherbis/buffer/README.md b/vendor/github.com/djherbis/buffer/README.md new file mode 100644 index 000000000000..b953f8b9fe01 --- /dev/null +++ b/vendor/github.com/djherbis/buffer/README.md @@ -0,0 +1,174 @@ +Buffer +========== + +[![GoDoc](https://godoc.org/github.com/djherbis/buffer?status.svg)](https://godoc.org/github.com/djherbis/buffer) +[![Release](https://img.shields.io/github/release/djherbis/buffer.svg)](https://github.com/djherbis/buffer/releases/latest) +[![Software License](https://img.shields.io/badge/license-MIT-brightgreen.svg)](LICENSE.txt) +[![Build Status](https://travis-ci.org/djherbis/buffer.svg?branch=master)](https://travis-ci.org/djherbis/buffer) +[![Coverage Status](https://coveralls.io/repos/djherbis/buffer/badge.svg?branch=master)](https://coveralls.io/r/djherbis/buffer?branch=master) +[![Go Report Card](https://goreportcard.com/badge/github.com/djherbis/buffer)](https://goreportcard.com/report/github.com/djherbis/buffer) + +Usage +------------ + +The following buffers provide simple unique behaviours which when composed can create complex buffering strategies. For use with github.com/djherbis/nio for Buffered io.Pipe and io.Copy implementations. + +For example: + +```go +import ( + "github.com/djherbis/buffer" + "github.com/djherbis/nio" + + "io/ioutil" +) + +// Buffer 32KB to Memory, after that buffer to 100MB chunked files +buf := buffer.NewUnboundedBuffer(32*1024, 100*1024*1024) +nio.Copy(w, r, buf) // Reads from r, writes to buf, reads from buf writes to w (concurrently). + +// Buffer 32KB to Memory, discard overflow +buf = buffer.NewSpill(32*1024, ioutil.Discard) +nio.Copy(w, r, buf) +``` + +Supported Buffers +------------ + +#### Bounded Buffers #### + +Memory: Wrapper for bytes.Buffer + +File: File-based buffering. The file never exceeds Cap() in length, no matter how many times its written/read from. It accomplishes this by "wrapping" around the fixed max-length file when the data gets too long but there is available freed space at the beginning of the file. The caller is responsible for closing and deleting the file when done. + +```go +import ( + "ioutil" + "os" + + "github.com/djherbis/buffer" +) + +// Create a File-based Buffer with max size 100MB +file, err := ioutil.TempFile("", "buffer") +if err != nil { + return err +} +defer os.Remove(file.Name()) +defer file.Close() + +buf := buffer.NewFile(100*1024*1024, file) + +// A simpler way: +pool := buffer.NewFilePool(100*1024*1024, "") // "" -- use temp dir +buf, err := pool.Get() // allocate the buffer +if err != nil { + return err +} +defer pool.Put(buf) // close and remove the allocated file for the buffer + +``` + +Multi: A fixed length linked-list of buffers. Each buffer reads from the next buffer so that all the buffered data is shifted upwards in the list when reading. Writes are always written to the first buffer in the list whose Len() < Cap(). + +```go +import ( + "github.com/djherbis/buffer" +) + +mem := buffer.New(32*1024) +file := buffer.NewFile(100*1024*1024, someFileObj)) // you'll need to manage Open(), Close() and Delete someFileObj + +// Buffer composed of 32KB of memory, and 100MB of file. +buf := buffer.NewMulti(mem, file) +``` + +#### Unbounded Buffers #### + +Partition: A queue of buffers. Writes always go to the last buffer in the queue. If all buffers are full, a new buffer is "pushed" to the end of the queue (generated by a user-given function). Reads come from the first buffer, when the first buffer is emptied it is "popped" off the queue. + +```go +import ( + "github.com/djherbis/buffer" +) + +// Create 32 KB sized-chunks of memory as needed to expand/contract the buffer size. +buf := buffer.NewPartition(buffer.NewMemPool(32*1024)) + +// Create 100 MB sized-chunks of files as needed to expand/contract the buffer size. +buf = buffer.NewPartition(buffer.NewFilePool(100*1024*1024, "")) +``` + +Ring: A single buffer which begins overwriting the oldest buffered data when it reaches its capacity. + +```go +import ( + "github.com/djherbis/buffer" +) + +// Create a File-based Buffer with max size 100MB +file := buffer.NewFile(100*1024*1024, someFileObj) // you'll need to Open(), Close() and Delete someFileObj. + +// If buffered data exceeds 100MB, overwrite oldest data as new data comes in +buf := buffer.NewRing(file) // requires BufferAt interface. +``` + +Spill: A single buffer which when full, writes the overflow to a given io.Writer. +-> Note that it will actually "spill" whenever there is an error while writing, this should only be a "full" error. + +```go +import ( + "github.com/djherbis/buffer" + "github.com/djherbis/nio" + + "io/ioutil" +) + +// Buffer 32KB to Memory, discard overflow +buf := buffer.NewSpill(32*1024, ioutil.Discard) +nio.Copy(w, r, buf) +``` + +#### Empty Buffer #### + +Discard: Reads always return EOF, writes goto ioutil.Discard. + +```go +import ( + "github.com/djherbis/buffer" +) + +// Reads will return io.EOF, writes will return success (nil error, full write) but no data was written. +buf := buffer.Discard +``` + +Custom Buffers +------------ + +Feel free to implement your own buffer, just meet the required interface (Buffer/BufferAt) and compose away! + +```go + +// Buffer Interface used by Multi and Partition +type Buffer interface { + Len() int64 + Cap() int64 + io.Reader + io.Writer + Reset() +} + +// BufferAt interface used by Ring +type BufferAt interface { + Buffer + io.ReaderAt + io.WriterAt +} + +``` + +Installation +------------ +```sh +go get github.com/djherbis/buffer +``` diff --git a/vendor/github.com/djherbis/buffer/buffer.go b/vendor/github.com/djherbis/buffer/buffer.go new file mode 100644 index 000000000000..34b0811dacf4 --- /dev/null +++ b/vendor/github.com/djherbis/buffer/buffer.go @@ -0,0 +1,48 @@ +// Package buffer implements a series of Buffers which can be composed to implement complicated buffering strategies +package buffer + +import ( + "io" + "os" +) + +// Buffer is used to Write() data which will be Read() later. +type Buffer interface { + Len() int64 // How much data is Buffered in bytes + Cap() int64 // How much data can be Buffered at once in bytes. + io.Reader // Read() will read from the top of the buffer [io.EOF if empty] + io.Writer // Write() will write to the end of the buffer [io.ErrShortWrite if not enough space] + Reset() // Truncates the buffer, Len() == 0. +} + +// BufferAt is a buffer which supports io.ReaderAt and io.WriterAt +type BufferAt interface { + Buffer + io.ReaderAt + io.WriterAt +} + +func len64(p []byte) int64 { + return int64(len(p)) +} + +// Gap returns buf.Cap() - buf.Len() +func Gap(buf Buffer) int64 { + return buf.Cap() - buf.Len() +} + +// Full returns true iff buf.Len() == buf.Cap() +func Full(buf Buffer) bool { + return buf.Len() == buf.Cap() +} + +// Empty returns false iff buf.Len() == 0 +func Empty(buf Buffer) bool { + return buf.Len() == 0 +} + +// NewUnboundedBuffer returns a Buffer which buffers "mem" bytes to memory +// and then creates file's of size "file" to buffer above "mem" bytes. +func NewUnboundedBuffer(mem, file int64) Buffer { + return NewMulti(New(mem), NewPartition(NewFilePool(file, os.TempDir()))) +} diff --git a/vendor/github.com/djherbis/buffer/discard.go b/vendor/github.com/djherbis/buffer/discard.go new file mode 100644 index 000000000000..ecf44987bab5 --- /dev/null +++ b/vendor/github.com/djherbis/buffer/discard.go @@ -0,0 +1,36 @@ +package buffer + +import ( + "encoding/gob" + "io" + "io/ioutil" + "math" +) + +type discard struct{} + +// Discard is a Buffer which writes to ioutil.Discard and read's return 0, io.EOF. +// All of its methods are concurrent safe. +var Discard Buffer = discard{} + +func (buf discard) Len() int64 { + return 0 +} + +func (buf discard) Cap() int64 { + return math.MaxInt64 +} + +func (buf discard) Reset() {} + +func (buf discard) Read(p []byte) (n int, err error) { + return 0, io.EOF +} + +func (buf discard) Write(p []byte) (int, error) { + return ioutil.Discard.Write(p) +} + +func init() { + gob.Register(&discard{}) +} diff --git a/vendor/github.com/djherbis/buffer/file.go b/vendor/github.com/djherbis/buffer/file.go new file mode 100644 index 000000000000..6eb77aabe6ec --- /dev/null +++ b/vendor/github.com/djherbis/buffer/file.go @@ -0,0 +1,72 @@ +package buffer + +import ( + "bytes" + "encoding/gob" + "fmt" + "io" + "os" + "path/filepath" + + "github.com/djherbis/buffer/wrapio" +) + +// File is used as the backing resource for a the NewFile BufferAt. +type File interface { + Name() string + Stat() (fi os.FileInfo, err error) + io.ReaderAt + io.WriterAt + Close() error +} + +type fileBuffer struct { + file File + *wrapio.Wrapper +} + +// NewFile returns a new BufferAt backed by "file" with max-size N. +func NewFile(N int64, file File) BufferAt { + return &fileBuffer{ + file: file, + Wrapper: wrapio.NewWrapper(file, 0, 0, N), + } +} + +func init() { + gob.Register(&fileBuffer{}) +} + +func (buf *fileBuffer) MarshalBinary() ([]byte, error) { + fullpath, err := filepath.Abs(filepath.Dir(buf.file.Name())) + if err != nil { + return nil, err + } + base := filepath.Base(buf.file.Name()) + buf.file.Close() + + buffer := bytes.NewBuffer(nil) + fmt.Fprintln(buffer, filepath.Join(fullpath, base)) + fmt.Fprintln(buffer, buf.Wrapper.N, buf.Wrapper.L, buf.Wrapper.O) + return buffer.Bytes(), nil +} + +func (buf *fileBuffer) UnmarshalBinary(data []byte) error { + buffer := bytes.NewBuffer(data) + var filename string + var N, L, O int64 + _, err := fmt.Fscanln(buffer, &filename) + if err != nil { + return err + } + + file, err := os.Open(filename) + if err != nil { + return err + } + buf.file = file + + _, err = fmt.Fscanln(buffer, &N, &L, &O) + buf.Wrapper = wrapio.NewWrapper(file, L, O, N) + return err +} diff --git a/vendor/github.com/djherbis/buffer/go.mod b/vendor/github.com/djherbis/buffer/go.mod new file mode 100644 index 000000000000..e0ef91a6a5ae --- /dev/null +++ b/vendor/github.com/djherbis/buffer/go.mod @@ -0,0 +1,3 @@ +module github.com/djherbis/buffer + +go 1.13 diff --git a/vendor/github.com/djherbis/buffer/limio/limit.go b/vendor/github.com/djherbis/buffer/limio/limit.go new file mode 100644 index 000000000000..deb50fffc9c6 --- /dev/null +++ b/vendor/github.com/djherbis/buffer/limio/limit.go @@ -0,0 +1,31 @@ +package limio + +import "io" + +type limitedWriter struct { + W io.Writer + N int64 +} + +func (l *limitedWriter) Write(p []byte) (n int, err error) { + if l.N <= 0 { + return 0, io.ErrShortWrite + } + if int64(len(p)) > l.N { + p = p[0:l.N] + err = io.ErrShortWrite + } + n, er := l.W.Write(p) + if er != nil { + err = er + } + l.N -= int64(n) + return n, err +} + +// LimitWriter works like io.LimitReader. It writes at most n bytes +// to the underlying Writer. It returns io.ErrShortWrite if more than n +// bytes are attempted to be written. +func LimitWriter(w io.Writer, n int64) io.Writer { + return &limitedWriter{W: w, N: n} +} diff --git a/vendor/github.com/djherbis/buffer/list.go b/vendor/github.com/djherbis/buffer/list.go new file mode 100644 index 000000000000..11ec6961c7af --- /dev/null +++ b/vendor/github.com/djherbis/buffer/list.go @@ -0,0 +1,47 @@ +package buffer + +import "math" + +// List is a slice of Buffers, it's the backing for NewPartition +type List []Buffer + +// Len is the sum of the Len()'s of the Buffers in the List. +func (l *List) Len() (n int64) { + for _, buffer := range *l { + if n > math.MaxInt64-buffer.Len() { + return math.MaxInt64 + } + n += buffer.Len() + } + return n +} + +// Cap is the sum of the Cap()'s of the Buffers in the List. +func (l *List) Cap() (n int64) { + for _, buffer := range *l { + if n > math.MaxInt64-buffer.Cap() { + return math.MaxInt64 + } + n += buffer.Cap() + } + return n +} + +// Reset calls Reset() on each of the Buffers in the list. +func (l *List) Reset() { + for _, buffer := range *l { + buffer.Reset() + } +} + +// Push adds a Buffer to the end of the List +func (l *List) Push(b Buffer) { + *l = append(*l, b) +} + +// Pop removes and returns a Buffer from the front of the List +func (l *List) Pop() (b Buffer) { + b = (*l)[0] + *l = (*l)[1:] + return b +} diff --git a/vendor/github.com/djherbis/buffer/list_at.go b/vendor/github.com/djherbis/buffer/list_at.go new file mode 100644 index 000000000000..6893df8856a0 --- /dev/null +++ b/vendor/github.com/djherbis/buffer/list_at.go @@ -0,0 +1,47 @@ +package buffer + +import "math" + +// ListAt is a slice of BufferAt's, it's the backing for NewPartitionAt +type ListAt []BufferAt + +// Len is the sum of the Len()'s of the BufferAt's in the list. +func (l *ListAt) Len() (n int64) { + for _, buffer := range *l { + if n > math.MaxInt64-buffer.Len() { + return math.MaxInt64 + } + n += buffer.Len() + } + return n +} + +// Cap is the sum of the Cap()'s of the BufferAt's in the list. +func (l *ListAt) Cap() (n int64) { + for _, buffer := range *l { + if n > math.MaxInt64-buffer.Cap() { + return math.MaxInt64 + } + n += buffer.Cap() + } + return n +} + +// Reset calls Reset() on each of the BufferAt's in the list. +func (l *ListAt) Reset() { + for _, buffer := range *l { + buffer.Reset() + } +} + +// Push adds a BufferAt to the end of the list +func (l *ListAt) Push(b BufferAt) { + *l = append(*l, b) +} + +// Pop removes and returns a BufferAt from the front of the list +func (l *ListAt) Pop() (b BufferAt) { + b = (*l)[0] + *l = (*l)[1:] + return b +} diff --git a/vendor/github.com/djherbis/buffer/mem.go b/vendor/github.com/djherbis/buffer/mem.go new file mode 100644 index 000000000000..8c7ef36461b5 --- /dev/null +++ b/vendor/github.com/djherbis/buffer/mem.go @@ -0,0 +1,82 @@ +package buffer + +import ( + "bytes" + "encoding/gob" + "fmt" + "io" + + "github.com/djherbis/buffer/limio" +) + +type memory struct { + N int64 + *bytes.Buffer +} + +// New returns a new in memory BufferAt with max size N. +// It's backed by a bytes.Buffer. +func New(n int64) BufferAt { + return &memory{ + N: n, + Buffer: bytes.NewBuffer(nil), + } +} + +func (buf *memory) Cap() int64 { + return buf.N +} + +func (buf *memory) Len() int64 { + return int64(buf.Buffer.Len()) +} + +func (buf *memory) Write(p []byte) (n int, err error) { + return limio.LimitWriter(buf.Buffer, Gap(buf)).Write(p) +} + +func (buf *memory) WriteAt(p []byte, off int64) (n int, err error) { + if off > buf.Len() { + return 0, io.ErrShortWrite + } else if len64(p)+off <= buf.Len() { + d := buf.Bytes()[off:] + return copy(d, p), nil + } else { + d := buf.Bytes()[off:] + n = copy(d, p) + m, err := buf.Write(p[n:]) + return n + m, err + } +} + +func (buf *memory) ReadAt(p []byte, off int64) (n int, err error) { + return bytes.NewReader(buf.Bytes()).ReadAt(p, off) +} + +func (buf *memory) Read(p []byte) (n int, err error) { + return io.LimitReader(buf.Buffer, buf.Len()).Read(p) +} + +func (buf *memory) ReadFrom(r io.Reader) (n int64, err error) { + return buf.Buffer.ReadFrom(io.LimitReader(r, Gap(buf))) +} + +func init() { + gob.Register(&memory{}) +} + +func (buf *memory) MarshalBinary() ([]byte, error) { + var b bytes.Buffer + fmt.Fprintln(&b, buf.N) + b.Write(buf.Bytes()) + return b.Bytes(), nil +} + +func (buf *memory) UnmarshalBinary(bindata []byte) error { + data := make([]byte, len(bindata)) + copy(data, bindata) + b := bytes.NewBuffer(data) + _, err := fmt.Fscanln(b, &buf.N) + buf.Buffer = bytes.NewBuffer(b.Bytes()) + return err +} diff --git a/vendor/github.com/djherbis/buffer/multi.go b/vendor/github.com/djherbis/buffer/multi.go new file mode 100644 index 000000000000..a752483ee43b --- /dev/null +++ b/vendor/github.com/djherbis/buffer/multi.go @@ -0,0 +1,185 @@ +package buffer + +import ( + "bytes" + "encoding/gob" + "io" + "math" +) + +type chain struct { + Buf BufferAt + Next BufferAt +} + +type nopBufferAt struct { + Buffer +} + +func (buf *nopBufferAt) ReadAt(p []byte, off int64) (int, error) { + panic("ReadAt not implemented") +} + +func (buf *nopBufferAt) WriteAt(p []byte, off int64) (int, error) { + panic("WriteAt not implemented") +} + +// toBufferAt converts a Buffer to a BufferAt with nop ReadAt and WriteAt funcs +func toBufferAt(buf Buffer) BufferAt { + return &nopBufferAt{Buffer: buf} +} + +// NewMultiAt returns a BufferAt which is the logical concatenation of the passed BufferAts. +// The data in the buffers is shifted such that there is no non-empty buffer following +// a non-full buffer, this process is also run after every Read. +// If no buffers are passed, the returned Buffer is nil. +func NewMultiAt(buffers ...BufferAt) BufferAt { + if len(buffers) == 0 { + return nil + } else if len(buffers) == 1 { + return buffers[0] + } + + buf := &chain{ + Buf: buffers[0], + Next: NewMultiAt(buffers[1:]...), + } + + buf.Defrag() + + return buf +} + +// NewMulti returns a Buffer which is the logical concatenation of the passed buffers. +// The data in the buffers is shifted such that there is no non-empty buffer following +// a non-full buffer, this process is also run after every Read. +// If no buffers are passed, the returned Buffer is nil. +func NewMulti(buffers ...Buffer) Buffer { + bufAt := make([]BufferAt, len(buffers)) + for i, buf := range buffers { + bufAt[i] = toBufferAt(buf) + } + return NewMultiAt(bufAt...) +} + +func (buf *chain) Reset() { + buf.Next.Reset() + buf.Buf.Reset() +} + +func (buf *chain) Cap() (n int64) { + Next := buf.Next.Cap() + if buf.Buf.Cap() > math.MaxInt64-Next { + return math.MaxInt64 + } + return buf.Buf.Cap() + Next +} + +func (buf *chain) Len() (n int64) { + Next := buf.Next.Len() + if buf.Buf.Len() > math.MaxInt64-Next { + return math.MaxInt64 + } + return buf.Buf.Len() + Next +} + +func (buf *chain) Defrag() { + for !Full(buf.Buf) && !Empty(buf.Next) { + r := io.LimitReader(buf.Next, Gap(buf.Buf)) + if _, err := io.Copy(buf.Buf, r); err != nil && err != io.EOF { + return + } + } +} + +func (buf *chain) Read(p []byte) (n int, err error) { + n, err = buf.Buf.Read(p) + if len(p[n:]) > 0 && (err == nil || err == io.EOF) { + m, err := buf.Next.Read(p[n:]) + n += m + if err != nil { + return n, err + } + } + + buf.Defrag() + + return n, err +} + +func (buf *chain) ReadAt(p []byte, off int64) (n int, err error) { + if buf.Buf.Len() < off { + return buf.Next.ReadAt(p, off-buf.Buf.Len()) + } + + n, err = buf.Buf.ReadAt(p, off) + if len(p[n:]) > 0 && (err == nil || err == io.EOF) { + var m int + m, err = buf.Next.ReadAt(p[n:], 0) + n += m + } + return n, err +} + +func (buf *chain) Write(p []byte) (n int, err error) { + if n, err = buf.Buf.Write(p); err == io.ErrShortWrite { + err = nil + } + p = p[n:] + if len(p) > 0 && err == nil { + m, err := buf.Next.Write(p) + n += m + if err != nil { + return n, err + } + } + return n, err +} + +func (buf *chain) WriteAt(p []byte, off int64) (n int, err error) { + switch { + case buf.Buf.Cap() <= off: // past the end + return buf.Next.WriteAt(p, off-buf.Buf.Cap()) + + case buf.Buf.Cap() >= off+int64(len(p)): // fits in + return buf.Buf.WriteAt(p, off) + + default: // partial fit + n, err = buf.Buf.WriteAt(p, off) + if len(p[n:]) > 0 && (err == nil || err == io.ErrShortWrite) { + var m int + m, err = buf.Next.WriteAt(p[n:], 0) + n += m + } + return n, err + } +} + +func init() { + gob.Register(&chain{}) + gob.Register(&nopBufferAt{}) +} + +func (buf *chain) MarshalBinary() ([]byte, error) { + b := bytes.NewBuffer(nil) + enc := gob.NewEncoder(b) + if err := enc.Encode(&buf.Buf); err != nil { + return nil, err + } + if err := enc.Encode(&buf.Next); err != nil { + return nil, err + } + return b.Bytes(), nil +} + +func (buf *chain) UnmarshalBinary(data []byte) error { + b := bytes.NewBuffer(data) + dec := gob.NewDecoder(b) + if err := dec.Decode(&buf.Buf); err != nil { + return err + } + if err := dec.Decode(&buf.Next); err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/djherbis/buffer/partition.go b/vendor/github.com/djherbis/buffer/partition.go new file mode 100644 index 000000000000..1726d20f8440 --- /dev/null +++ b/vendor/github.com/djherbis/buffer/partition.go @@ -0,0 +1,101 @@ +package buffer + +import ( + "encoding/gob" + "io" + "math" +) + +type partition struct { + List + Pool +} + +// NewPartition returns a Buffer which uses a Pool to extend or shrink its size as needed. +// It automatically allocates new buffers with pool.Get() to extend is length, and +// pool.Put() to release unused buffers as it shrinks. +func NewPartition(pool Pool, buffers ...Buffer) Buffer { + return &partition{ + Pool: pool, + List: buffers, + } +} + +func (buf *partition) Cap() int64 { + return math.MaxInt64 +} + +func (buf *partition) Read(p []byte) (n int, err error) { + for len(p) > 0 { + + if len(buf.List) == 0 { + return n, io.EOF + } + + buffer := buf.List[0] + + if Empty(buffer) { + buf.Pool.Put(buf.Pop()) + continue + } + + m, er := buffer.Read(p) + n += m + p = p[m:] + + if er != nil && er != io.EOF { + return n, er + } + + } + return n, nil +} + +func (buf *partition) grow() error { + next, err := buf.Pool.Get() + if err != nil { + return err + } + buf.Push(next) + return nil +} + +func (buf *partition) Write(p []byte) (n int, err error) { + for len(p) > 0 { + + if len(buf.List) == 0 { + if err := buf.grow(); err != nil { + return n, err + } + } + + buffer := buf.List[len(buf.List)-1] + + if Full(buffer) { + if err := buf.grow(); err != nil { + return n, err + } + continue + } + + m, er := buffer.Write(p) + n += m + p = p[m:] + + if er != nil && er != io.ErrShortWrite { + return n, er + } + + } + return n, nil +} + +func (buf *partition) Reset() { + for len(buf.List) > 0 { + buf.Pool.Put(buf.Pop()) + } +} + +func init() { + gob.Register(&partition{}) +} diff --git a/vendor/github.com/djherbis/buffer/partition_at.go b/vendor/github.com/djherbis/buffer/partition_at.go new file mode 100644 index 000000000000..56b44e8d27de --- /dev/null +++ b/vendor/github.com/djherbis/buffer/partition_at.go @@ -0,0 +1,187 @@ +package buffer + +import ( + "encoding/gob" + "errors" + "io" + "math" +) + +type partitionAt struct { + ListAt + PoolAt +} + +// NewPartitionAt returns a BufferAt which uses a PoolAt to extend or shrink its size as needed. +// It automatically allocates new buffers with pool.Get() to extend is length, and +// pool.Put() to release unused buffers as it shrinks. +func NewPartitionAt(pool PoolAt, buffers ...BufferAt) BufferAt { + return &partitionAt{ + PoolAt: pool, + ListAt: buffers, + } +} + +func (buf *partitionAt) Cap() int64 { + return math.MaxInt64 +} + +func (buf *partitionAt) Read(p []byte) (n int, err error) { + for len(p) > 0 { + + if len(buf.ListAt) == 0 { + return n, io.EOF + } + + buffer := buf.ListAt[0] + + if Empty(buffer) { + buf.PoolAt.Put(buf.Pop()) + continue + } + + m, er := buffer.Read(p) + n += m + p = p[m:] + + if er != nil && er != io.EOF { + return n, er + } + + } + return n, nil +} + +func (buf *partitionAt) ReadAt(p []byte, off int64) (n int, err error) { + if off < 0 { + return 0, errors.New("buffer.PartionAt.ReadAt: negative offset") + } + for _, buffer := range buf.ListAt { + // Find the buffer where this offset is found. + if buffer.Len() <= off { + off -= buffer.Len() + continue + } + + m, er := buffer.ReadAt(p, off) + n += m + p = p[m:] + + if er != nil && er != io.EOF { + return n, er + } + if len(p) == 0 { + return n, er + } + // We need to read more, starting from 0 in the next buffer. + off = 0 + } + if len(p) > 0 { + return n, io.EOF + } + return n, nil +} + +func (buf *partitionAt) grow() error { + next, err := buf.PoolAt.Get() + if err != nil { + return err + } + buf.Push(next) + return nil +} + +func (buf *partitionAt) Write(p []byte) (n int, err error) { + for len(p) > 0 { + + if len(buf.ListAt) == 0 { + if err := buf.grow(); err != nil { + return n, err + } + } + + buffer := buf.ListAt[len(buf.ListAt)-1] + + if Full(buffer) { + if err := buf.grow(); err != nil { + return n, err + } + continue + } + + m, er := buffer.Write(p) + n += m + p = p[m:] + + if er != nil && er != io.ErrShortWrite { + return n, er + } + + } + return n, nil +} + +func (buf *partitionAt) WriteAt(p []byte, off int64) (n int, err error) { + if off < 0 { + return 0, errors.New("buffer.PartionAt.WriteAt: negative offset") + } + if off == buf.Len() { // writing at the end special case + if err := buf.grow(); err != nil { + return 0, err + } + } + fitCheck := BufferAt.Len + for i := 0; i < len(buf.ListAt); i++ { + buffer := buf.ListAt[i] + + // Find the buffer where this offset is found. + if fitCheck(buffer) < off { + off -= fitCheck(buffer) + continue + } + + if i+1 == len(buf.ListAt) { + fitCheck = BufferAt.Cap + } + + endOff := off + int64(len(p)) + if fitCheck(buffer) >= endOff { + // Everything should fit. + return buffer.WriteAt(p, off) + } + + // Assume it won't all fit, only write what should fit. + canFit := int(fitCheck(buffer) - off) + if len(p[:canFit]) > 0 { + var m int + m, err = buffer.WriteAt(p[:canFit], off) + n += m + p = p[m:] + } + off = 0 // All writes are at offset 0 of following buffers now. + + if err != nil || len(p) == 0 { + return n, err + } + if i+1 == len(buf.ListAt) { + if err := buf.grow(); err != nil { + return 0, err + } + fitCheck = BufferAt.Cap + } + } + if len(p) > 0 { + err = io.ErrShortWrite + } + return n, err +} + +func (buf *partitionAt) Reset() { + for len(buf.ListAt) > 0 { + buf.PoolAt.Put(buf.Pop()) + } +} + +func init() { + gob.Register(&partitionAt{}) +} diff --git a/vendor/github.com/djherbis/buffer/pool.go b/vendor/github.com/djherbis/buffer/pool.go new file mode 100644 index 000000000000..0e57de8406bf --- /dev/null +++ b/vendor/github.com/djherbis/buffer/pool.go @@ -0,0 +1,111 @@ +package buffer + +import ( + "bytes" + "encoding/binary" + "encoding/gob" + "io/ioutil" + "os" + "sync" +) + +// Pool provides a way to Allocate and Release Buffer objects +// Pools mut be concurrent-safe for calls to Get() and Put(). +type Pool interface { + Get() (Buffer, error) // Allocate a Buffer + Put(buf Buffer) error // Release or Reuse a Buffer +} + +type pool struct { + pool sync.Pool +} + +// NewPool returns a Pool(), it's backed by a sync.Pool so its safe for concurrent use. +// Get() and Put() errors will always be nil. +// It will not work with gob. +func NewPool(New func() Buffer) Pool { + return &pool{ + pool: sync.Pool{ + New: func() interface{} { + return New() + }, + }, + } +} + +func (p *pool) Get() (Buffer, error) { + return p.pool.Get().(Buffer), nil +} + +func (p *pool) Put(buf Buffer) error { + buf.Reset() + p.pool.Put(buf) + return nil +} + +type memPool struct { + N int64 + Pool +} + +// NewMemPool returns a Pool, Get() returns an in memory buffer of max size N. +// Put() returns the buffer to the pool after resetting it. +// Get() and Put() errors will always be nil. +func NewMemPool(N int64) Pool { + return &memPool{ + N: N, + Pool: NewPool(func() Buffer { + return New(N) + }), + } +} + +func (m *memPool) MarshalBinary() ([]byte, error) { + buf := bytes.NewBuffer(nil) + err := binary.Write(buf, binary.LittleEndian, m.N) + return buf.Bytes(), err +} + +func (m *memPool) UnmarshalBinary(data []byte) error { + buf := bytes.NewReader(data) + err := binary.Read(buf, binary.LittleEndian, &m.N) + m.Pool = NewPool(func() Buffer { + return New(m.N) + }) + return err +} + +type filePool struct { + N int64 + Directory string +} + +// NewFilePool returns a Pool, Get() returns a file-based buffer of max size N. +// Put() closes and deletes the underlying file for the buffer. +// Get() may return an error if it fails to create a file for the buffer. +// Put() may return an error if it fails to delete the file. +func NewFilePool(N int64, dir string) Pool { + return &filePool{N: N, Directory: dir} +} + +func (p *filePool) Get() (Buffer, error) { + file, err := ioutil.TempFile(p.Directory, "buffer") + if err != nil { + return nil, err + } + return NewFile(p.N, file), nil +} + +func (p *filePool) Put(buf Buffer) (err error) { + buf.Reset() + if fileBuf, ok := buf.(*fileBuffer); ok { + fileBuf.file.Close() + err = os.Remove(fileBuf.file.Name()) + } + return err +} + +func init() { + gob.Register(&memPool{}) + gob.Register(&filePool{}) +} diff --git a/vendor/github.com/djherbis/buffer/pool_at.go b/vendor/github.com/djherbis/buffer/pool_at.go new file mode 100644 index 000000000000..23635ad128fc --- /dev/null +++ b/vendor/github.com/djherbis/buffer/pool_at.go @@ -0,0 +1,111 @@ +package buffer + +import ( + "bytes" + "encoding/binary" + "encoding/gob" + "io/ioutil" + "os" + "sync" +) + +// PoolAt provides a way to Allocate and Release BufferAt objects +// PoolAt's mut be concurrent-safe for calls to Get() and Put(). +type PoolAt interface { + Get() (BufferAt, error) // Allocate a BufferAt + Put(buf BufferAt) error // Release or Reuse a BufferAt +} + +type poolAt struct { + poolAt sync.Pool +} + +// NewPoolAt returns a PoolAt(), it's backed by a sync.Pool so its safe for concurrent use. +// Get() and Put() errors will always be nil. +// It will not work with gob. +func NewPoolAt(New func() BufferAt) PoolAt { + return &poolAt{ + poolAt: sync.Pool{ + New: func() interface{} { + return New() + }, + }, + } +} + +func (p *poolAt) Get() (BufferAt, error) { + return p.poolAt.Get().(BufferAt), nil +} + +func (p *poolAt) Put(buf BufferAt) error { + buf.Reset() + p.poolAt.Put(buf) + return nil +} + +type memPoolAt struct { + N int64 + PoolAt +} + +// NewMemPoolAt returns a PoolAt, Get() returns an in memory buffer of max size N. +// Put() returns the buffer to the pool after resetting it. +// Get() and Put() errors will always be nil. +func NewMemPoolAt(N int64) PoolAt { + return &memPoolAt{ + N: N, + PoolAt: NewPoolAt(func() BufferAt { + return New(N) + }), + } +} + +func (m *memPoolAt) MarshalBinary() ([]byte, error) { + buf := bytes.NewBuffer(nil) + err := binary.Write(buf, binary.LittleEndian, m.N) + return buf.Bytes(), err +} + +func (m *memPoolAt) UnmarshalBinary(data []byte) error { + buf := bytes.NewReader(data) + err := binary.Read(buf, binary.LittleEndian, &m.N) + m.PoolAt = NewPoolAt(func() BufferAt { + return New(m.N) + }) + return err +} + +type filePoolAt struct { + N int64 + Directory string +} + +// NewFilePoolAt returns a PoolAt, Get() returns a file-based buffer of max size N. +// Put() closes and deletes the underlying file for the buffer. +// Get() may return an error if it fails to create a file for the buffer. +// Put() may return an error if it fails to delete the file. +func NewFilePoolAt(N int64, dir string) PoolAt { + return &filePoolAt{N: N, Directory: dir} +} + +func (p *filePoolAt) Get() (BufferAt, error) { + file, err := ioutil.TempFile(p.Directory, "buffer") + if err != nil { + return nil, err + } + return NewFile(p.N, file), nil +} + +func (p *filePoolAt) Put(buf BufferAt) (err error) { + buf.Reset() + if fileBuf, ok := buf.(*fileBuffer); ok { + fileBuf.file.Close() + err = os.Remove(fileBuf.file.Name()) + } + return err +} + +func init() { + gob.Register(&memPoolAt{}) + gob.Register(&filePoolAt{}) +} diff --git a/vendor/github.com/djherbis/buffer/ring.go b/vendor/github.com/djherbis/buffer/ring.go new file mode 100644 index 000000000000..7da7f0a70b3a --- /dev/null +++ b/vendor/github.com/djherbis/buffer/ring.go @@ -0,0 +1,58 @@ +package buffer + +import ( + "io" + "math" + + "github.com/djherbis/buffer/wrapio" +) + +type ring struct { + BufferAt + L int64 + *wrapio.WrapReader + *wrapio.WrapWriter +} + +// NewRing returns a Ring Buffer from a BufferAt. +// It overwrites old data in the Buffer when needed (when its full). +func NewRing(buffer BufferAt) Buffer { + return &ring{ + BufferAt: buffer, + WrapReader: wrapio.NewWrapReader(buffer, 0, buffer.Cap()), + WrapWriter: wrapio.NewWrapWriter(buffer, 0, buffer.Cap()), + } +} + +func (buf *ring) Len() int64 { + return buf.L +} + +func (buf *ring) Cap() int64 { + return math.MaxInt64 +} + +func (buf *ring) Read(p []byte) (n int, err error) { + if buf.L == buf.BufferAt.Cap() { + buf.WrapReader.Seek(buf.WrapWriter.Offset(), 0) + } + n, err = io.LimitReader(buf.WrapReader, buf.L).Read(p) + buf.L -= int64(n) + return n, err +} + +func (buf *ring) Write(p []byte) (n int, err error) { + n, err = buf.WrapWriter.Write(p) + buf.L += int64(n) + if buf.L > buf.BufferAt.Cap() { + buf.L = buf.BufferAt.Cap() + } + return n, err +} + +func (buf *ring) Reset() { + buf.BufferAt.Reset() + buf.L = 0 + buf.WrapReader = wrapio.NewWrapReader(buf.BufferAt, 0, buf.BufferAt.Cap()) + buf.WrapWriter = wrapio.NewWrapWriter(buf.BufferAt, 0, buf.BufferAt.Cap()) +} diff --git a/vendor/github.com/djherbis/buffer/spill.go b/vendor/github.com/djherbis/buffer/spill.go new file mode 100644 index 000000000000..44d618b549f7 --- /dev/null +++ b/vendor/github.com/djherbis/buffer/spill.go @@ -0,0 +1,41 @@ +package buffer + +import ( + "encoding/gob" + "io" + "io/ioutil" + "math" +) + +type spill struct { + Buffer + Spiller io.Writer +} + +// NewSpill returns a Buffer which writes data to w when there's an error +// writing to buf. Such as when buf is full, or the disk is full, etc. +func NewSpill(buf Buffer, w io.Writer) Buffer { + if w == nil { + w = ioutil.Discard + } + return &spill{ + Buffer: buf, + Spiller: w, + } +} + +func (buf *spill) Cap() int64 { + return math.MaxInt64 +} + +func (buf *spill) Write(p []byte) (n int, err error) { + if n, err = buf.Buffer.Write(p); err != nil { + m, err := buf.Spiller.Write(p[n:]) + return m + n, err + } + return len(p), nil +} + +func init() { + gob.Register(&spill{}) +} diff --git a/vendor/github.com/djherbis/buffer/swap.go b/vendor/github.com/djherbis/buffer/swap.go new file mode 100644 index 000000000000..bdb11a461a59 --- /dev/null +++ b/vendor/github.com/djherbis/buffer/swap.go @@ -0,0 +1,99 @@ +package buffer + +import ( + "encoding/gob" + "io" +) + +type swap struct { + A BufferAt + B BufferAt +} + +// NewSwap creates a Buffer which writes to a until you write past a.Cap() +// then it io.Copy's from a to b and writes to b. +// Once the Buffer is empty again, it starts over writing to a. +// Note that if b.Cap() <= a.Cap() it will cause a panic, b is expected +// to be larger in order to accommodate writes past a.Cap(). +func NewSwap(a, b Buffer) Buffer { + return NewSwapAt(toBufferAt(a), toBufferAt(b)) +} + +// NewSwapAt creates a BufferAt which writes to a until you write past a.Cap() +// then it io.Copy's from a to b and writes to b. +// Once the Buffer is empty again, it starts over writing to a. +// Note that if b.Cap() <= a.Cap() it will cause a panic, b is expected +// to be larger in order to accommodate writes past a.Cap(). +func NewSwapAt(a, b BufferAt) BufferAt { + if b.Cap() <= a.Cap() { + panic("Buffer b must be larger than a.") + } + return &swap{A: a, B: b} +} + +func (buf *swap) Len() int64 { + return buf.A.Len() + buf.B.Len() +} + +func (buf *swap) Cap() int64 { + return buf.B.Cap() +} + +func (buf *swap) Read(p []byte) (n int, err error) { + if buf.A.Len() > 0 { + return buf.A.Read(p) + } + return buf.B.Read(p) +} + +func (buf *swap) ReadAt(p []byte, off int64) (n int, err error) { + if buf.A.Len() > 0 { + return buf.A.ReadAt(p, off) + } + return buf.B.ReadAt(p, off) +} + +func (buf *swap) Write(p []byte) (n int, err error) { + switch { + case buf.B.Len() > 0: + n, err = buf.B.Write(p) + + case buf.A.Len()+int64(len(p)) > buf.A.Cap(): + _, err = io.Copy(buf.B, buf.A) + if err == nil { + n, err = buf.B.Write(p) + } + + default: + n, err = buf.A.Write(p) + } + + return n, err +} + +func (buf *swap) WriteAt(p []byte, off int64) (n int, err error) { + switch { + case buf.B.Len() > 0: + n, err = buf.B.WriteAt(p, off) + + case off+int64(len(p)) > buf.A.Cap(): + _, err = io.Copy(buf.B, buf.A) + if err == nil { + n, err = buf.B.WriteAt(p, off) + } + + default: + n, err = buf.A.WriteAt(p, off) + } + + return n, err +} + +func (buf *swap) Reset() { + buf.A.Reset() + buf.B.Reset() +} + +func init() { + gob.Register(&swap{}) +} diff --git a/vendor/github.com/djherbis/buffer/wrapio/limitwrap.go b/vendor/github.com/djherbis/buffer/wrapio/limitwrap.go new file mode 100644 index 000000000000..7af975027227 --- /dev/null +++ b/vendor/github.com/djherbis/buffer/wrapio/limitwrap.go @@ -0,0 +1,94 @@ +package wrapio + +import ( + "encoding/gob" + "io" + + "github.com/djherbis/buffer/limio" +) + +// ReadWriterAt implements io.ReaderAt and io.WriterAt +type ReadWriterAt interface { + io.ReaderAt + io.WriterAt +} + +// Wrapper implements a io.ReadWriter and ReadWriterAt such that +// when reading/writing goes past N bytes, it "wraps" back to the beginning. +type Wrapper struct { + // N is the offset at which to "wrap" back to the start + N int64 + // L is the length of the data written + L int64 + // O is our offset in the data + O int64 + rwa ReadWriterAt +} + +// NewWrapper creates a Wrapper based on ReadWriterAt rwa. +// L is the current length, O is the current offset, and N is offset at which we "wrap". +func NewWrapper(rwa ReadWriterAt, L, O, N int64) *Wrapper { + return &Wrapper{ + L: L, + O: O, + N: N, + rwa: rwa, + } +} + +// Len returns the # of bytes in the Wrapper +func (wpr *Wrapper) Len() int64 { + return wpr.L +} + +// Cap returns the "wrap" offset (max # of bytes) +func (wpr *Wrapper) Cap() int64 { + return wpr.N +} + +// Reset seeks to the start (0 offset), and sets the length to 0. +func (wpr *Wrapper) Reset() { + wpr.O = 0 + wpr.L = 0 +} + +// SetReadWriterAt lets you switch the underlying Read/WriterAt +func (wpr *Wrapper) SetReadWriterAt(rwa ReadWriterAt) { + wpr.rwa = rwa +} + +// Read reads from the current offset into p, wrapping at Cap() +func (wpr *Wrapper) Read(p []byte) (n int, err error) { + n, err = wpr.ReadAt(p, 0) + wpr.L -= int64(n) + wpr.O += int64(n) + wpr.O %= wpr.N + return n, err +} + +// ReadAt reads from the current offset+off into p, wrapping at Cap() +func (wpr *Wrapper) ReadAt(p []byte, off int64) (n int, err error) { + wrap := NewWrapReader(wpr.rwa, wpr.O+off, wpr.N) + r := io.LimitReader(wrap, wpr.L-off) + return r.Read(p) +} + +// Write writes p to the end of the Wrapper (at Len()), wrapping at Cap() +func (wpr *Wrapper) Write(p []byte) (n int, err error) { + return wpr.WriteAt(p, wpr.L) +} + +// WriteAt writes p at the current offset+off, wrapping at Cap() +func (wpr *Wrapper) WriteAt(p []byte, off int64) (n int, err error) { + wrap := NewWrapWriter(wpr.rwa, wpr.O+off, wpr.N) + w := limio.LimitWriter(wrap, wpr.N-off) + n, err = w.Write(p) + if wpr.L < off+int64(n) { + wpr.L = int64(n) + off + } + return n, err +} + +func init() { + gob.Register(&Wrapper{}) +} diff --git a/vendor/github.com/djherbis/buffer/wrapio/wrap.go b/vendor/github.com/djherbis/buffer/wrapio/wrap.go new file mode 100644 index 000000000000..b60a6b1392c2 --- /dev/null +++ b/vendor/github.com/djherbis/buffer/wrapio/wrap.go @@ -0,0 +1,139 @@ +package wrapio + +import "io" + +// DoerAt is a common interface for wrappers WriteAt or ReadAt functions +type DoerAt interface { + DoAt([]byte, int64) (int, error) +} + +// DoAtFunc is implemented by ReadAt/WriteAt +type DoAtFunc func([]byte, int64) (int, error) + +type wrapper struct { + off int64 + wrapAt int64 + doat DoAtFunc +} + +func (w *wrapper) Offset() int64 { + return w.off +} + +func (w *wrapper) Seek(offset int64, whence int) (int64, error) { + switch whence { + case 0: + w.off = offset + case 1: + w.off += offset + case 2: + w.off = (w.wrapAt + offset) + } + w.off %= w.wrapAt + return w.off, nil +} + +func (w *wrapper) DoAt(p []byte, off int64) (n int, err error) { + return w.doat(p, off) +} + +// WrapWriter wraps writes around a section of data. +type WrapWriter struct { + *wrapper +} + +// NewWrapWriter creates a WrapWriter starting at offset off, and wrapping at offset wrapAt. +func NewWrapWriter(w io.WriterAt, off int64, wrapAt int64) *WrapWriter { + return &WrapWriter{ + &wrapper{ + doat: w.WriteAt, + off: (off % wrapAt), + wrapAt: wrapAt, + }, + } +} + +// Write writes p starting at the current offset, wrapping when it reaches the end. +// The current offset is shifted forward by the amount written. +func (w *WrapWriter) Write(p []byte) (n int, err error) { + n, err = Wrap(w, p, w.off, w.wrapAt) + w.off = (w.off + int64(n)) % w.wrapAt + return n, err +} + +// WriteAt writes p starting at offset off, wrapping when it reaches the end. +func (w *WrapWriter) WriteAt(p []byte, off int64) (n int, err error) { + return Wrap(w, p, off, w.wrapAt) +} + +// WrapReader wraps reads around a section of data. +type WrapReader struct { + *wrapper +} + +// NewWrapReader creates a WrapReader starting at offset off, and wrapping at offset wrapAt. +func NewWrapReader(r io.ReaderAt, off int64, wrapAt int64) *WrapReader { + return &WrapReader{ + &wrapper{ + doat: r.ReadAt, + off: (off % wrapAt), + wrapAt: wrapAt, + }, + } +} + +// Read reads into p starting at the current offset, wrapping if it reaches the end. +// The current offset is shifted forward by the amount read. +func (r *WrapReader) Read(p []byte) (n int, err error) { + n, err = Wrap(r, p, r.off, r.wrapAt) + r.off = (r.off + int64(n)) % r.wrapAt + return n, err +} + +// ReadAt reads into p starting at the current offset, wrapping when it reaches the end. +func (r *WrapReader) ReadAt(p []byte, off int64) (n int, err error) { + return Wrap(r, p, off, r.wrapAt) +} + +// maxConsecutiveEmptyActions determines how many consecutive empty reads/writes can occur before giving up +const maxConsecutiveEmptyActions = 100 + +// Wrap causes an action on an array of bytes (like read/write) to be done from an offset off, +// wrapping at offset wrapAt. +func Wrap(w DoerAt, p []byte, off int64, wrapAt int64) (n int, err error) { + var m, fails int + + off %= wrapAt + + for len(p) > 0 { + + if off+int64(len(p)) < wrapAt { + m, err = w.DoAt(p, off) + } else { + space := wrapAt - off + m, err = w.DoAt(p[:space], off) + } + + if err != nil && err != io.EOF { + return n + m, err + } + + switch m { + case 0: + fails++ + default: + fails = 0 + } + + if fails > maxConsecutiveEmptyActions { + return n + m, io.ErrNoProgress + } + + n += m + p = p[m:] + off += int64(m) + off %= wrapAt + } + + return n, err +} diff --git a/vendor/github.com/djherbis/nio/v3/.travis.yml b/vendor/github.com/djherbis/nio/v3/.travis.yml new file mode 100644 index 000000000000..06dc07b8958e --- /dev/null +++ b/vendor/github.com/djherbis/nio/v3/.travis.yml @@ -0,0 +1,22 @@ +language: go +go: +- tip +before_install: +- go get -u golang.org/x/lint/golint +- go get github.com/axw/gocov/gocov +- go get github.com/mattn/goveralls +- if ! go get code.google.com/p/go.tools/cmd/cover; then go get golang.org/x/tools/cmd/cover; + fi +script: +- '[ "${TRAVIS_PULL_REQUEST}" != "false" ] || $HOME/gopath/bin/goveralls -service=travis-ci + -repotoken $COVERALLS_TOKEN' +- "$HOME/gopath/bin/golint ./..." +- go vet +- go test -bench=.* -v ./... +notifications: + email: + on_success: never + on_failure: change +env: + global: + secure: gpKsimMN5YScLnbcoWvJPw8VL+qCpZgnC4i8mFn/lRX5Ta9FhDMROQre0Ko4bU9RX/u/IBL1fO/IyaVtVWQ0fhsDi+ovrh3LgzewwZBgz7FGiyFpagvf91Jwq5Yus15QQZ8MebrQ41H1YiWMdLOHlZdN6gNb0cswg3w4MRjbGb4= diff --git a/vendor/github.com/djherbis/nio/v3/LICENSE.txt b/vendor/github.com/djherbis/nio/v3/LICENSE.txt new file mode 100644 index 000000000000..f5daa194f780 --- /dev/null +++ b/vendor/github.com/djherbis/nio/v3/LICENSE.txt @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2015 Dustin H + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/djherbis/nio/v3/README.md b/vendor/github.com/djherbis/nio/v3/README.md new file mode 100644 index 000000000000..8dc316553926 --- /dev/null +++ b/vendor/github.com/djherbis/nio/v3/README.md @@ -0,0 +1,65 @@ +nio +========== + +[![GoDoc](https://godoc.org/github.com/djherbis/nio?status.svg)](https://godoc.org/github.com/djherbis/nio) +[![Release](https://img.shields.io/github/release/djherbis/nio.svg)](https://github.com/djherbis/nio/releases/latest) +[![Software License](https://img.shields.io/badge/license-MIT-brightgreen.svg)](LICENSE.txt) +[![Build Status](https://travis-ci.org/djherbis/nio.svg)](https://travis-ci.org/djherbis/nio) +[![Coverage Status](https://coveralls.io/repos/djherbis/nio/badge.svg?branch=master)](https://coveralls.io/r/djherbis/nio?branch=master) +[![Go Report Card](https://goreportcard.com/badge/github.com/djherbis/nio)](https://goreportcard.com/report/github.com/djherbis/nio) + +Usage +----- + +The Buffer interface: + +```go +type Buffer interface { + Len() int64 + Cap() int64 + io.ReadWriter +} + +``` + +nio's Copy method concurrently copies from an io.Reader to a supplied nio.Buffer, +then from the nio.Buffer to an io.Writer. This way, blocking writes don't slow the io.Reader. + +```go +import ( + "github.com/djherbis/buffer" + "github.com/djherbis/nio" +) + +buf := buffer.New(32*1024) // 32KB In memory Buffer +nio.Copy(w, r, buf) // Reads and Writes concurrently, buffering using buf. +``` + +nio's Pipe method is a buffered version of io.Pipe +The writer return once its data has been written to the Buffer. +The reader returns with data off the Buffer. + +```go +import ( + "gopkg.in/djherbis/buffer.v1" + "gopkg.in/djherbis/nio.v2" +) + +buf := buffer.New(32*1024) // 32KB In memory Buffer +r, w := nio.Pipe(buf) +``` + +Installation +------------ +```sh +go get gopkg.in/djherbis/nio.v2 +``` + +For some pre-built buffers grab: +```sh +go get gopkg.in/djherbis/buffer.v1 +``` + +Mentions +------------ +[GopherCon 2017: Peter Bourgon - Evolutionary Optimization with Go](https://www.youtube.com/watch?v=ha8gdZ27wMo&start=2077&end=2140) diff --git a/vendor/github.com/djherbis/nio/v3/go.mod b/vendor/github.com/djherbis/nio/v3/go.mod new file mode 100644 index 000000000000..a5f602451e2a --- /dev/null +++ b/vendor/github.com/djherbis/nio/v3/go.mod @@ -0,0 +1,5 @@ +module github.com/djherbis/nio/v3 + +go 1.16 + +require github.com/djherbis/buffer v1.1.0 diff --git a/vendor/github.com/djherbis/nio/v3/go.sum b/vendor/github.com/djherbis/nio/v3/go.sum new file mode 100644 index 000000000000..f4578ade83cf --- /dev/null +++ b/vendor/github.com/djherbis/nio/v3/go.sum @@ -0,0 +1,2 @@ +github.com/djherbis/buffer v1.1.0 h1:uGQ+DZDAMlfC2z3khbBtLcAHC0wyoNrX9lpOml3g3fg= +github.com/djherbis/buffer v1.1.0/go.mod h1:VwN8VdFkMY0DCALdY8o00d3IZ6Amz/UNVMWcSaJT44o= diff --git a/vendor/github.com/djherbis/nio/v3/nio.go b/vendor/github.com/djherbis/nio/v3/nio.go new file mode 100644 index 000000000000..ab476e62d2f9 --- /dev/null +++ b/vendor/github.com/djherbis/nio/v3/nio.go @@ -0,0 +1,53 @@ +// Package nio provides a few buffered io primitives. +package nio + +import "io" + +// Buffer is used to store bytes. +type Buffer interface { + // Len returns how many bytes are buffered + Len() int64 + + // Cap returns how many bytes can in the buffer at a time + Cap() int64 + + // ReadWriter writes are stored in the buffer, reads return the stored data + io.ReadWriter +} + +// Pipe creates a buffered pipe. +// It can be used to connect code expecting an io.Reader with code expecting an io.Writer. +// Reads on one end read from the supplied Buffer. Writes write to the supplied Buffer. +// It is safe to call Read and Write in parallel with each other or with Close. +// Close will complete once pending I/O is done, and may cancel blocking Read/Writes. +// Buffered data will still be available to Read after the Writer has been closed. +// Parallel calls to Read, and parallel calls to Write are also safe : +// the individual calls will be gated sequentially. +func Pipe(buf Buffer) (r *PipeReader, w *PipeWriter) { + p := newBufferedPipe(buf) + r = &PipeReader{bufpipe: p} + w = &PipeWriter{bufpipe: p} + return r, w +} + +// Copy copies from src to buf, and from buf to dst in parallel until +// either EOF is reached on src or an error occurs. It returns the number of bytes +// copied to dst and the first error encountered while copying, if any. +// EOF is not considered to be an error. If src implements WriterTo, it is used to +// write to the supplied Buffer. If dst implements ReaderFrom, it is used to read from +// the supplied Buffer. +func Copy(dst io.Writer, src io.Reader, buf Buffer) (n int64, err error) { + return io.Copy(dst, NewReader(src, buf)) +} + +// NewReader reads from the buffer which is concurrently filled with data from the passed src. +func NewReader(src io.Reader, buf Buffer) io.ReadCloser { + r, w := Pipe(buf) + + go func() { + _, err := io.Copy(w, src) + w.CloseWithError(err) + }() + + return r +} diff --git a/vendor/github.com/djherbis/nio/v3/sync.go b/vendor/github.com/djherbis/nio/v3/sync.go new file mode 100644 index 000000000000..fec538381a60 --- /dev/null +++ b/vendor/github.com/djherbis/nio/v3/sync.go @@ -0,0 +1,177 @@ +package nio + +import ( + "io" + "sync" +) + +// PipeReader is the read half of the pipe. +type PipeReader struct { + *bufpipe +} + +// CloseWithError closes the reader; subsequent writes to the write half of the pipe will return the error err. +func (r *PipeReader) CloseWithError(err error) error { + if err == nil { + err = io.ErrClosedPipe + } + r.bufpipe.l.Lock() + defer r.bufpipe.l.Unlock() + if r.bufpipe.rerr == nil { + r.bufpipe.rerr = err + r.bufpipe.rwait.Signal() + r.bufpipe.wwait.Signal() + } + return nil +} + +// Close closes the reader; subsequent writes to the write half of the pipe will return the error io.ErrClosedPipe. +func (r *PipeReader) Close() error { + return r.CloseWithError(nil) +} + +// A PipeWriter is the write half of a pipe. +type PipeWriter struct { + *bufpipe +} + +// CloseWithError closes the writer; once the buffer is empty subsequent reads from the read half of the pipe will return +// no bytes and the error err, or io.EOF if err is nil. CloseWithError always returns nil. +func (w *PipeWriter) CloseWithError(err error) error { + if err == nil { + err = io.EOF + } + w.bufpipe.l.Lock() + defer w.bufpipe.l.Unlock() + if w.bufpipe.werr == nil { + w.bufpipe.werr = err + w.bufpipe.rwait.Signal() + w.bufpipe.wwait.Signal() + } + return nil +} + +// Close closes the writer; once the buffer is empty subsequent reads from the read half of the pipe will return +// no bytes and io.EOF after all the buffer has been read. CloseWithError always returns nil. +func (w *PipeWriter) Close() error { + return w.CloseWithError(nil) +} + +type bufpipe struct { + rl sync.Mutex + wl sync.Mutex + l sync.Mutex + rwait sync.Cond + wwait sync.Cond + b Buffer + rerr error // if reader closed, error to give writes + werr error // if writer closed, error to give reads +} + +func newBufferedPipe(buf Buffer) *bufpipe { + s := &bufpipe{ + b: buf, + } + s.rwait.L = &s.l + s.wwait.L = &s.l + return s +} + +func empty(buf Buffer) bool { + return buf.Len() == 0 +} + +func gap(buf Buffer) int64 { + return buf.Cap() - buf.Len() +} + +func (r *PipeReader) Read(p []byte) (n int, err error) { + r.rl.Lock() + defer r.rl.Unlock() + + r.l.Lock() + defer r.wwait.Signal() + defer r.l.Unlock() + + for empty(r.b) { + if r.rerr != nil { + return 0, io.ErrClosedPipe + } + + if r.werr != nil { + return 0, r.werr + } + + r.wwait.Signal() + r.rwait.Wait() + } + + n, err = r.b.Read(p) + if err == io.EOF { + err = nil + } + + return n, err +} + +func (w *PipeWriter) Write(p []byte) (int, error) { + var m int + var n, space int64 + var err error + sliceLen := int64(len(p)) + + w.wl.Lock() + defer w.wl.Unlock() + + w.l.Lock() + defer w.rwait.Signal() + defer w.l.Unlock() + + if w.werr != nil { + return 0, io.ErrClosedPipe + } + + // while there is data to write + for writeLen := sliceLen; writeLen > 0 && err == nil; writeLen = sliceLen - n { + + // wait for some buffer space to become available (while no errs) + for space = gap(w.b); space == 0 && w.rerr == nil && w.werr == nil; space = gap(w.b) { + w.rwait.Signal() + w.wwait.Wait() + } + + if w.rerr != nil { + err = w.rerr + break + } + + if w.werr != nil { + err = io.ErrClosedPipe + break + } + + // space > 0, and locked + + var nn int64 + if space < writeLen { + // => writeLen - space > 0 + // => (sliceLen - n) - space > 0 + // => sliceLen > n + space + // nn is safe to use for p[:nn] + nn = n + space + } else { + nn = sliceLen + } + + m, err = w.b.Write(p[n:nn]) + n += int64(m) + + // one of the following cases has occurred: + // 1. done writing -> writeLen == 0 + // 2. ran out of buffer space -> gap(w.b) == 0 + // 3. an error occurred err != nil + // all of these cases are handled at the top of this loop + } + + return int(n), err +} diff --git a/vendor/modules.txt b/vendor/modules.txt index c971bcd26832..6949fdb3f281 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -231,6 +231,14 @@ github.com/denisenkom/go-mssqldb/internal/querytext github.com/dgrijalva/jwt-go # github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f github.com/dgryski/go-rendezvous +# github.com/djherbis/buffer v1.2.0 +## explicit +github.com/djherbis/buffer +github.com/djherbis/buffer/limio +github.com/djherbis/buffer/wrapio +# github.com/djherbis/nio/v3 v3.0.1 +## explicit +github.com/djherbis/nio/v3 # github.com/dlclark/regexp2 v1.4.0 github.com/dlclark/regexp2 github.com/dlclark/regexp2/syntax