This commit is contained in:
Chongyi Zheng 2024-04-26 19:03:43 -04:00 committed by GitHub
commit 6d1cf0b7a4
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
35 changed files with 1848 additions and 327 deletions

View File

@ -135,7 +135,7 @@ func runRepoSyncReleases(_ *cli.Context) error {
}
log.Trace(" currentNumReleases is %d, running SyncReleasesWithTags", oldnum)
if err = repo_module.SyncReleasesWithTags(ctx, repo, gitRepo); err != nil {
if err = repo_module.SyncReleasesWithTags(ctx, repo, gitRepo, false); err != nil {
log.Warn(" SyncReleasesWithTags: %v", err)
gitRepo.Close()
continue

View File

@ -228,6 +228,7 @@ type Comment struct {
Poster *user_model.User `xorm:"-"`
OriginalAuthor string
OriginalAuthorID int64
OriginalID int64 // Only used in synced comments
IssueID int64 `xorm:"INDEX"`
Issue *Issue `xorm:"-"`
LabelID int64
@ -1305,3 +1306,77 @@ func InsertIssueComments(ctx context.Context, comments []*Comment) error {
}
return committer.Commit()
}
// UpsertIssueComments inserts many comments of issues.
func UpsertIssueComments(ctx context.Context, comments []*Comment) error {
if len(comments) == 0 {
return nil
}
issueIDs := make(map[int64]bool)
for _, comment := range comments {
issueIDs[comment.IssueID] = true
}
return db.WithTx(ctx, func(ctx context.Context) error {
sess := db.GetEngine(ctx)
for _, comment := range comments {
exists, err := sess.Exist(&Comment{
IssueID: comment.IssueID,
OriginalID: comment.OriginalID,
})
if err != nil {
return err
}
if !exists {
if _, err := sess.NoAutoTime().Insert(comment); err != nil {
return err
}
} else {
if _, err := sess.NoAutoTime().Where(
"issue_id = ? AND original_id = ?", comment.IssueID, comment.OriginalID,
).AllCols().Update(comment); err != nil {
return err
}
}
for _, reaction := range comment.Reactions {
reaction.IssueID = comment.IssueID
reaction.CommentID = comment.ID
}
if len(comment.Reactions) > 0 {
for _, reaction := range comment.Reactions {
// issue comment reaction is uniquely identified by issue_id, comment_id and type
exists, err := sess.Exist(&Reaction{
IssueID: reaction.IssueID,
CommentID: reaction.CommentID,
Type: reaction.Type,
})
if err != nil {
return err
}
if exists {
if _, err := sess.Where(
"issue_id = ? AND comment_id = ? AND type = ?",
reaction.IssueID, reaction.CommentID, reaction.Type,
).AllCols().Update(&reaction); err != nil {
return err
}
} else {
if _, err := sess.Insert(&reaction); err != nil {
return err
}
}
}
}
}
for issueID := range issueIDs {
if _, err := db.Exec(ctx, "UPDATE issue SET num_comments = (SELECT count(*) FROM comment WHERE issue_id = ? AND `type`=?) WHERE id = ?",
issueID, CommentTypeComment, issueID); err != nil {
return err
}
}
return nil
})
}

View File

@ -909,3 +909,81 @@ func insertIssue(ctx context.Context, issue *Issue) error {
return nil
}
// UpsertIssues creates new issues and updates existing issues in database
func UpsertIssues(ctx context.Context, issues ...*Issue) error {
return db.WithTx(ctx, func(ctx context.Context) error {
for _, issue := range issues {
if _, err := upsertIssue(ctx, issue); err != nil {
return err
}
}
return nil
})
}
func upsertIssue(ctx context.Context, issue *Issue) (isInsert bool, err error) {
sess := db.GetEngine(ctx)
has, err := sess.Table("issue").Where("repo_id = ? AND `index` = ?", issue.RepoID, issue.Index).Cols("id").Get(&issue.ID)
if err != nil {
return false, err
}
if !has {
return true, insertIssue(ctx, issue)
}
return false, updateIssue(ctx, issue)
}
func updateIssue(ctx context.Context, issue *Issue) error {
sess := db.GetEngine(ctx)
if _, err := sess.NoAutoTime().ID(issue.ID).AllCols().Update(issue); err != nil {
return err
}
issueLabels := resolveIssueLabels(issue.ID, issue.Labels)
if len(issueLabels) > 0 {
// delete old labels
if _, err := sess.Table("issue_label").Where("issue_id = ?", issue.ID).Delete(); err != nil {
return err
}
// insert new labels
if _, err := sess.Insert(issueLabels); err != nil {
return err
}
}
for _, reaction := range issue.Reactions {
reaction.IssueID = issue.ID
}
if len(issue.Reactions) > 0 {
// update existing reactions and insert new ones
for _, reaction := range issue.Reactions {
exists, err := sess.Exist(&Reaction{ID: reaction.ID})
if err != nil {
return err
}
if exists {
if _, err := sess.ID(reaction.ID).AllCols().Update(&reaction); err != nil {
return err
}
} else {
if _, err := sess.Insert(&reaction); err != nil {
return err
}
}
}
}
return nil
}
func resolveIssueLabels(issueID int64, labels []*Label) []IssueLabel {
issueLabels := make([]IssueLabel, 0, len(labels))
for _, label := range labels {
issueLabels = append(issueLabels, IssueLabel{
IssueID: issueID,
LabelID: label.ID,
})
}
return issueLabels
}

View File

@ -92,6 +92,8 @@ type Label struct {
CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"`
UpdatedUnix timeutil.TimeStamp `xorm:"INDEX updated"`
OriginalID int64 // Only for migrating data from other system, used for syncing
NumOpenIssues int `xorm:"-"`
NumOpenRepoIssues int64 `xorm:"-"`
IsChecked bool `xorm:"-"`
@ -390,6 +392,69 @@ func GetLabelsByRepoID(ctx context.Context, repoID int64, sortType string, listO
return labels, sess.Find(&labels)
}
// UpdateLabelsByRepoID adds, updates, and deletes relevant labels for the given repository.
func UpdateLabelsByRepoID(ctx context.Context, repoID int64, labels ...*Label) error {
return db.WithTx(ctx, func(ctx context.Context) error {
existingLabels, err := GetLabelsByRepoID(ctx, repoID, "", db.ListOptions{})
if err != nil {
return err
}
labelsToAdd := make([]*Label, 0)
labelsToUpdate := make([]*Label, 0)
labelsToDelete := make([]*Label, 0)
for _, l := range labels {
var foundLabel *Label
for _, existingLabel := range existingLabels {
if existingLabel.OriginalID == l.OriginalID {
foundLabel = existingLabel
break
}
}
if foundLabel == nil {
labelsToAdd = append(labelsToAdd, l)
} else if foundLabel.Name != l.Name || foundLabel.Description != l.Description ||
foundLabel.Color != l.Color {
l.RepoID = repoID
labelsToUpdate = append(labelsToUpdate, l)
}
}
for _, existingLabel := range existingLabels {
found := false
for _, label := range labels {
if label.OriginalID == existingLabel.OriginalID {
found = true
break
}
}
if !found {
labelsToDelete = append(labelsToDelete, existingLabel)
}
}
for _, l := range labelsToAdd {
if err = NewLabel(ctx, l); err != nil {
return err
}
}
for _, l := range labelsToUpdate {
if err = UpdateLabel(ctx, l); err != nil {
return err
}
}
for _, l := range labelsToDelete {
if err = DeleteLabel(ctx, repoID, l.ID); err != nil {
return err
}
}
return nil
})
}
// CountLabelsByRepoID count number of all labels that belong to given repository by ID.
func CountLabelsByRepoID(ctx context.Context, repoID int64) (int64, error) {
return db.GetEngine(ctx).Where("repo_id = ?", repoID).Count(&Label{})

View File

@ -65,6 +65,8 @@ type Milestone struct {
DeadlineString string `xorm:"-"`
TotalTrackedTime int64 `xorm:"-"`
OriginalID int64 // ID from the upstream syncing source
}
func init() {
@ -380,3 +382,85 @@ func InsertMilestones(ctx context.Context, ms ...*Milestone) (err error) {
}
return committer.Commit()
}
// UpdateMilestones updates milestones of repository.
func UpdateMilestones(ctx context.Context, ms ...*Milestone) (err error) {
if len(ms) == 0 {
return nil
}
return db.WithTx(ctx, func(ctx context.Context) error {
sess := db.GetEngine(ctx)
// get existing milestones
existingMilestones := make([]*Milestone, 0)
if err = sess.Where("repo_id = ?", ms[0].RepoID).Find(&existingMilestones); err != nil {
return err
}
milestonesToAdd := make([]*Milestone, 0)
milestonesToUpdate := make([]*Milestone, 0)
milestonesToDelete := make([]*Milestone, 0)
foundMap := make(map[int64]bool)
openCount := 0
closedCount := 0
for _, m := range ms {
var foundMilestone *Milestone
for _, existingMilestone := range existingMilestones {
if existingMilestone.OriginalID == m.OriginalID {
foundMilestone = existingMilestone
foundMap[existingMilestone.ID] = true
break
}
}
if foundMilestone == nil {
milestonesToAdd = append(milestonesToAdd, m)
} else if foundMilestone.OriginalID != m.OriginalID {
m.ID = foundMilestone.ID
milestonesToUpdate = append(milestonesToUpdate, m)
}
if m.IsClosed {
closedCount++
} else {
openCount++
}
}
for _, existingMilestone := range existingMilestones {
if _, exist := foundMap[existingMilestone.ID]; !exist {
milestonesToDelete = append(milestonesToDelete, existingMilestone)
}
}
if len(milestonesToAdd) > 0 {
if _, err = sess.Insert(milestonesToAdd); err != nil {
return err
}
}
for _, m := range milestonesToUpdate {
if _, err = sess.ID(m.ID).AllCols().Update(m); err != nil {
return err
}
}
for _, m := range milestonesToDelete {
if _, err = sess.ID(m.ID).Delete(m); err != nil {
return err
}
}
if _, err = sess.ID(ms[0].RepoID).Update(&repo_model.Repository{
NumMilestones: len(ms),
NumOpenMilestones: openCount,
NumClosedMilestones: closedCount,
}); err != nil {
return err
}
return nil
})
}

View File

@ -1046,6 +1046,35 @@ func InsertPullRequests(ctx context.Context, prs ...*PullRequest) error {
return committer.Commit()
}
// UpsertPullRequests inserts new pull requests and updates existing pull requests in database
func UpsertPullRequests(ctx context.Context, prs ...*PullRequest) error {
if len(prs) == 0 {
return nil
}
return db.WithTx(ctx, func(ctx context.Context) error {
sess := db.GetEngine(ctx)
for _, pr := range prs {
isInsert, err := upsertIssue(ctx, pr.Issue)
if err != nil {
return err
}
pr.IssueID = pr.Issue.ID
if isInsert {
if _, err := sess.NoAutoTime().Insert(pr); err != nil {
return err
}
} else {
if _, err := sess.NoAutoTime().ID(pr.ID).AllCols().Update(pr); err != nil {
return err
}
}
}
return nil
})
}
// GetPullRequestByMergedCommit returns a merged pull request by the given commit
func GetPullRequestByMergedCommit(ctx context.Context, repoID int64, sha string) (*PullRequest, error) {
pr := new(PullRequest)

View File

@ -128,6 +128,7 @@ type Review struct {
ReviewerTeam *organization.Team `xorm:"-"`
OriginalAuthor string
OriginalAuthorID int64
OriginalID int64 // ID from the upstream syncing source
Issue *Issue `xorm:"-"`
IssueID int64 `xorm:"index"`
Content string `xorm:"TEXT"`
@ -591,17 +592,7 @@ func InsertReviews(ctx context.Context, reviews []*Review) error {
return err
}
if _, err := sess.NoAutoTime().Insert(&Comment{
Type: CommentTypeReview,
Content: review.Content,
PosterID: review.ReviewerID,
OriginalAuthor: review.OriginalAuthor,
OriginalAuthorID: review.OriginalAuthorID,
IssueID: review.IssueID,
ReviewID: review.ID,
CreatedUnix: review.CreatedUnix,
UpdatedUnix: review.UpdatedUnix,
}); err != nil {
if _, err := sess.NoAutoTime().Insert(generateCommentFromReview(review)); err != nil {
return err
}
@ -619,6 +610,109 @@ func InsertReviews(ctx context.Context, reviews []*Review) error {
return committer.Commit()
}
// UpsertReviews inserts new reviews and updates existing ones.
// This function is used for syncing from the pull mirror.
func UpsertReviews(ctx context.Context, reviews []*Review) error {
return db.WithTx(ctx, func(ctx context.Context) error {
sess := db.GetEngine(ctx)
for _, review := range reviews {
exists, err := sess.Where("original_id = ?", review.OriginalID).Exist(&Review{})
if err != nil {
return err
}
if !exists {
if _, err := sess.NoAutoTime().Insert(review); err != nil {
return err
}
if _, err := sess.NoAutoTime().Insert(generateCommentFromReview(review)); err != nil {
return err
}
for _, c := range review.Comments {
c.ReviewID = review.ID
}
if len(review.Comments) > 0 {
if _, err := sess.NoAutoTime().Insert(review.Comments); err != nil {
return err
}
}
} else {
if _, err = sess.NoAutoTime().Where("original_id = ?", review.OriginalID).Update(review); err != nil {
return err
}
// Get id of the review
if err = sess.NoAutoTime().Where("original_id = ?", review.OriginalID).Find(review); err != nil {
return err
}
comment := generateCommentFromReview(review)
exists, err := existsCommentByReviewIDAndCreatedUnix(sess, comment)
if err != nil {
return err
}
if !exists {
if _, err := sess.NoAutoTime().Insert(comment); err != nil {
return err
}
} else {
if _, err := sess.NoAutoTime().Where("original_id = ?", comment.OriginalID).Update(comment); err != nil {
return err
}
}
for _, c := range review.Comments {
c.ReviewID = review.ID
}
if len(review.Comments) > 0 {
for _, comment := range review.Comments {
exists, err := existsCommentByReviewIDAndCreatedUnix(sess, comment)
if err != nil {
return err
}
if !exists {
if _, err := sess.NoAutoTime().Insert(comment); err != nil {
return err
}
} else {
if _, err := sess.NoAutoTime().Where("original_id = ?", comment.OriginalID).Update(comment); err != nil {
return err
}
}
}
}
}
}
return nil
})
}
func existsCommentByReviewIDAndCreatedUnix(sess db.Engine, comment *Comment) (bool, error) {
return sess.Where("original_id = ?", comment.OriginalID).Exist(&Comment{})
}
func generateCommentFromReview(review *Review) *Comment {
return &Comment{
Type: CommentTypeReview,
Content: review.Content,
PosterID: review.ReviewerID,
OriginalAuthor: review.OriginalAuthor,
OriginalAuthorID: review.OriginalAuthorID,
IssueID: review.IssueID,
ReviewID: review.ID,
CreatedUnix: review.CreatedUnix,
UpdatedUnix: review.UpdatedUnix,
OriginalID: review.OriginalID,
}
}
// AddReviewRequest add a review request from one reviewer
func AddReviewRequest(ctx context.Context, issue *Issue, reviewer, doer *user_model.User) (*Comment, error) {
ctx, committer, err := db.TxContext(ctx)

View File

@ -25,6 +25,14 @@ type Mirror struct {
Interval time.Duration
EnablePrune bool `xorm:"NOT NULL DEFAULT true"`
SyncWiki bool `xorm:"NOT NULL DEFAULT false"`
SyncIssues bool `xorm:"NOT NULL DEFAULT false"`
SyncMilestones bool `xorm:"NOT NULL DEFAULT false"`
SyncLabels bool `xorm:"NOT NULL DEFAULT false"`
SyncReleases bool `xorm:"NOT NULL DEFAULT false"`
SyncComments bool `xorm:"NOT NULL DEFAULT false"`
SyncPullRequests bool `xorm:"NOT NULL DEFAULT false"`
UpdatedUnix timeutil.TimeStamp `xorm:"INDEX"`
NextUpdateUnix timeutil.TimeStamp `xorm:"INDEX"`

View File

@ -17,6 +17,7 @@ import (
user_model "code.gitea.io/gitea/models/user"
"code.gitea.io/gitea/modules/container"
"code.gitea.io/gitea/modules/optional"
"code.gitea.io/gitea/modules/storage"
"code.gitea.io/gitea/modules/structs"
"code.gitea.io/gitea/modules/timeutil"
"code.gitea.io/gitea/modules/util"
@ -563,3 +564,74 @@ func InsertReleases(ctx context.Context, rels ...*Release) error {
return committer.Commit()
}
// UpsertReleases inserts new releases and updates existing releases
func UpsertReleases(ctx context.Context, rels ...*Release) error {
ctx, committer, err := db.TxContext(ctx)
if err != nil {
return err
}
defer committer.Close()
sess := db.GetEngine(ctx)
for _, rel := range rels {
exists, err := sess.Where("repo_id = ? AND tag_name = ?", rel.RepoID, rel.TagName).Exist(&Release{})
if err != nil {
return err
}
if !exists {
if _, err := sess.NoAutoTime().Insert(rel); err != nil {
return err
}
if len(rel.Attachments) > 0 {
for i := range rel.Attachments {
rel.Attachments[i].ReleaseID = rel.ID
}
if _, err := sess.NoAutoTime().Insert(rel.Attachments); err != nil {
return err
}
}
} else {
if _, err := sess.NoAutoTime().
Where("repo_id = ? AND tag_name = ?", rel.RepoID, rel.TagName).
AllCols().Update(rel); err != nil {
return err
}
if len(rel.Attachments) > 0 {
for i := range rel.Attachments {
rel.Attachments[i].ReleaseID = rel.ID
}
var existingReleases []*Attachment
err := sess.Where("release_id = ?", rel.ID).Find(&existingReleases)
if err != nil {
return err
}
if _, err := sess.NoAutoTime().Insert(rel.Attachments); err != nil {
return err
}
var ids []int64
for _, existingRelease := range existingReleases {
// TODO: file operations are not atomic, so errors should be handled
err = storage.Attachments.Delete(existingRelease.RelativePath())
if err != nil {
return err
}
ids = append(ids, existingRelease.ID)
}
if _, err := sess.NoAutoTime().In("id", ids).Delete(&Attachment{}); err != nil {
return err
}
}
}
}
return committer.Commit()
}

View File

@ -587,12 +587,18 @@ func (repo *Repository) CanCreateBranch() bool {
// CanEnablePulls returns true if repository meets the requirements of accepting pulls.
func (repo *Repository) CanEnablePulls() bool {
return !repo.IsMirror && !repo.IsEmpty
return !repo.IsEmpty
}
// AllowsPulls returns true if repository meets the requirements of accepting pulls and has them enabled.
func (repo *Repository) AllowsPulls(ctx context.Context) bool {
return repo.CanEnablePulls() && repo.UnitEnabled(ctx, unit.TypePullRequests)
return repo.CanEnablePulls() && !repo.IsMirror && repo.UnitEnabled(ctx, unit.TypePullRequests)
}
// AllowsIssues returns true if repository meets the requirements of accepting issues and has them enabled.
func (repo *Repository) AllowsIssues(ctx context.Context) bool {
// TODO: disable only when issues are synced
return !repo.IsMirror && repo.UnitEnabled(ctx, unit.TypePullRequests)
}
// CanEnableEditor returns true if repository meets the requirements of web editor.

View File

@ -255,6 +255,34 @@ func AddTopic(ctx context.Context, repoID int64, topicName string) (*Topic, erro
return topic, committer.Commit()
}
func AddTopics(repoID int64, topicNames ...string) error {
ctx, committer, err := db.TxContext(db.DefaultContext)
if err != nil {
return err
}
defer committer.Close()
sess := db.GetEngine(ctx)
for _, topicName := range topicNames {
if strings.TrimSpace(topicName) == "" {
continue
}
_, err := addTopicByNameToRepo(ctx, repoID, topicName)
if err != nil {
return err
}
}
if _, err := sess.ID(repoID).Cols("topics").Update(&Repository{
Topics: topicNames,
}); err != nil {
return err
}
return committer.Commit()
}
// DeleteTopic removes a topic name from a repository (if it has it)
func DeleteTopic(ctx context.Context, repoID int64, topicName string) (*Topic, error) {
topic, err := GetRepoTopicByName(ctx, repoID, topicName)
@ -276,7 +304,7 @@ func DeleteTopic(ctx context.Context, repoID int64, topicName string) (*Topic, e
return topic, err
}
// SaveTopics save topics to a repository
// SaveTopics save topics to a repository (add and delete respective topics)
func SaveTopics(ctx context.Context, repoID int64, topicNames ...string) error {
topics, err := db.Find[Topic](ctx, &FindTopicOptions{
RepoID: repoID,

View File

@ -24,7 +24,8 @@ type Comment struct {
Updated time.Time
Content string
Reactions []*Reaction
Meta map[string]any `yaml:"meta,omitempty"` // see models/issues/comment.go for fields in Comment struct
Meta map[string]interface{} `yaml:"meta,omitempty"` // see models/issues/comment.go for fields in Comment struct
OriginalID int64 `yaml:"-"` // ID from the upstream syncing source
}
// GetExternalName ExternalUserMigrated interface

View File

@ -6,6 +6,7 @@ package migration
import (
"context"
"time"
"code.gitea.io/gitea/modules/structs"
)
@ -25,6 +26,14 @@ type Downloader interface {
GetPullRequests(page, perPage int) ([]*PullRequest, bool, error)
GetReviews(reviewable Reviewable) ([]*Review, error)
FormatCloneURL(opts MigrateOptions, remoteAddr string) (string, error)
// For syncing issues and pull requests
SupportSyncing() bool
GetNewIssues(page, perPage int, updatedAfter time.Time) ([]*Issue, bool, error)
GetNewComments(commentable Commentable, updatedAfter time.Time) ([]*Comment, bool, error)
GetAllNewComments(page, perPage int, updatedAfter time.Time) ([]*Comment, bool, error)
GetNewPullRequests(page, perPage int, updatedAfter time.Time) ([]*PullRequest, bool, error)
GetNewReviews(reviewable Reviewable, updatedAfter time.Time) ([]*Review, error)
}
// DownloaderFactory defines an interface to match a downloader implementation and create a downloader

View File

@ -10,4 +10,5 @@ type Label struct {
Color string `json:"color"`
Description string `json:"description"`
Exclusive bool `json:"exclusive"`
OriginalID int64 `json:"-"` // ID from the upstream syncing source
}

View File

@ -15,4 +15,5 @@ type Milestone struct {
Updated *time.Time `json:"updated"`
Closed *time.Time `json:"closed"`
State string `json:"state"` // open, closed
OriginalID int64 `json:"-"` // ID from the upstream syncing source
}

View File

@ -6,6 +6,7 @@ package migration
import (
"context"
"net/url"
"time"
)
// NullDownloader implements a blank downloader
@ -86,3 +87,33 @@ func (n NullDownloader) FormatCloneURL(opts MigrateOptions, remoteAddr string) (
func (n NullDownloader) SupportGetRepoComments() bool {
return false
}
// SupportSyncing returns true if it supports syncing issues/PRs/etc from pull mirror
func (n NullDownloader) SupportSyncing() bool {
return false
}
// GetNewIssues returns new issues updated after the given time according start and limit
func (n NullDownloader) GetNewIssues(page, perPage int, updatedAfter time.Time) ([]*Issue, bool, error) {
return nil, false, ErrNotSupported{Entity: "NewIssues"}
}
// GetNewComments returns comments of an issue or PR after the given time
func (n NullDownloader) GetNewComments(commentable Commentable, updatedAfter time.Time) ([]*Comment, bool, error) {
return nil, false, ErrNotSupported{Entity: "NewComments"}
}
// GetAllNewComments returns paginated comments after the given time
func (n NullDownloader) GetAllNewComments(page, perPage int, updatedAfter time.Time) ([]*Comment, bool, error) {
return nil, false, ErrNotSupported{Entity: "AllNewComments"}
}
// GetNewPullRequests returns pull requests after the given time according page and perPage
func (n NullDownloader) GetNewPullRequests(page, perPage int, updatedAfter time.Time) ([]*PullRequest, bool, error) {
return nil, false, ErrNotSupported{Entity: "NewPullRequests"}
}
// GetNewReviews returns new pull requests review after the given time
func (n NullDownloader) GetNewReviews(reviewable Reviewable, updatedAfter time.Time) ([]*Review, error) {
return nil, ErrNotSupported{Entity: "NewReviews"}
}

View File

@ -0,0 +1,95 @@
// Copyright 2023 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package migration
// NullUploader implements a blank uploader
type NullUploader struct{}
var _ Uploader = &NullUploader{}
func (g *NullUploader) MaxBatchInsertSize(tp string) int {
return 0
}
func (g *NullUploader) CreateRepo(repo *Repository, opts MigrateOptions) error {
return nil
}
func (g *NullUploader) CreateTopics(topic ...string) error {
return nil
}
func (g *NullUploader) CreateMilestones(milestones ...*Milestone) error {
return nil
}
func (g *NullUploader) CreateReleases(releases ...*Release) error {
return nil
}
func (g *NullUploader) SyncTags() error {
return nil
}
func (g *NullUploader) CreateLabels(labels ...*Label) error {
return nil
}
func (g *NullUploader) CreateIssues(issues ...*Issue) error {
return nil
}
func (g *NullUploader) CreateComments(comments ...*Comment) error {
return nil
}
func (g *NullUploader) CreatePullRequests(prs ...*PullRequest) error {
return nil
}
func (g *NullUploader) CreateReviews(reviews ...*Review) error {
return nil
}
func (g *NullUploader) UpdateTopics(topic ...string) error {
return nil
}
func (g *NullUploader) UpdateMilestones(milestones ...*Milestone) error {
return nil
}
func (g *NullUploader) UpdateLabels(labels ...*Label) error {
return nil
}
func (g *NullUploader) PatchReleases(releases ...*Release) error {
return nil
}
func (g *NullUploader) PatchIssues(issues ...*Issue) error {
return nil
}
func (g *NullUploader) PatchComments(comments ...*Comment) error {
return nil
}
func (g *NullUploader) PatchPullRequests(prs ...*PullRequest) error {
return nil
}
func (g *NullUploader) PatchReviews(reviews ...*Review) error {
return nil
}
func (g *NullUploader) Rollback() error {
return nil
}
func (g *NullUploader) Finish() error {
return nil
}
func (g *NullUploader) Close() {}

View File

@ -37,6 +37,7 @@ type Release struct {
Assets []*ReleaseAsset
Created time.Time
Published time.Time
OriginalID int64 `yaml:"-"` // ID from the upstream syncing source
}
// GetExternalName ExternalUserMigrated interface

View File

@ -42,6 +42,7 @@ type Review struct {
CreatedAt time.Time `yaml:"created_at"`
State string // PENDING, APPROVED, REQUEST_CHANGES, or COMMENT
Comments []*ReviewComment
OriginalID int64 // ID from the upstream syncing source
}
// GetExternalName ExternalUserMigrated interface
@ -52,16 +53,17 @@ func (r *Review) GetExternalID() int64 { return r.ReviewerID }
// ReviewComment represents a review comment
type ReviewComment struct {
ID int64
InReplyTo int64 `yaml:"in_reply_to"`
Content string
TreePath string `yaml:"tree_path"`
DiffHunk string `yaml:"diff_hunk"`
Position int
Line int
CommitID string `yaml:"commit_id"`
PosterID int64 `yaml:"poster_id"`
Reactions []*Reaction
CreatedAt time.Time `yaml:"created_at"`
UpdatedAt time.Time `yaml:"updated_at"`
ID int64
InReplyTo int64 `yaml:"in_reply_to"`
Content string
TreePath string `yaml:"tree_path"`
DiffHunk string `yaml:"diff_hunk"`
Position int
Line int
CommitID string `yaml:"commit_id"`
PosterID int64 `yaml:"poster_id"`
Reactions []*Reaction
CreatedAt time.Time `yaml:"created_at"`
UpdatedAt time.Time `yaml:"updated_at"`
OriginalID int64 // ID from the upstream syncing source
}

View File

@ -8,7 +8,7 @@ package migration
type Uploader interface {
MaxBatchInsertSize(tp string) int
CreateRepo(repo *Repository, opts MigrateOptions) error
CreateTopics(topic ...string) error
CreateTopics(topics ...string) error
CreateMilestones(milestones ...*Milestone) error
CreateReleases(releases ...*Release) error
SyncTags() error
@ -17,6 +17,14 @@ type Uploader interface {
CreateComments(comments ...*Comment) error
CreatePullRequests(prs ...*PullRequest) error
CreateReviews(reviews ...*Review) error
UpdateTopics(topics ...string) error // update topics of a repository, and delete those that are not in the list
UpdateMilestones(milestones ...*Milestone) error // update milestones of a repository, and delete those that are not in the list
UpdateLabels(labels ...*Label) error // rewrite all issue labels and delete those that are not in the list
PatchReleases(releases ...*Release) error // add or update releases (no deletes)
PatchComments(comments ...*Comment) error // add or update comments (no deletes)
PatchIssues(issues ...*Issue) error // add or update issues (no deletes)
PatchPullRequests(prs ...*PullRequest) error // add or update pull requests (no deletes)
PatchReviews(reviews ...*Review) error // add or update reviews (no deletes)
Rollback() error
Finish() error
Close()

View File

@ -5,13 +5,16 @@ package repository
import (
"context"
"errors"
"fmt"
"io"
"net/http"
"strings"
"time"
"code.gitea.io/gitea/models/db"
git_model "code.gitea.io/gitea/models/git"
"code.gitea.io/gitea/models/organization"
repo_model "code.gitea.io/gitea/models/repo"
user_model "code.gitea.io/gitea/models/user"
"code.gitea.io/gitea/modules/container"
@ -19,8 +22,10 @@ import (
"code.gitea.io/gitea/modules/gitrepo"
"code.gitea.io/gitea/modules/lfs"
"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/migration"
"code.gitea.io/gitea/modules/setting"
"code.gitea.io/gitea/modules/timeutil"
"code.gitea.io/gitea/modules/util"
)
/*
@ -42,6 +47,251 @@ func WikiRemoteURL(ctx context.Context, remote string) string {
return ""
}
// MigrateRepositoryGitData starts migrating git related data after created migrating repository
func MigrateRepositoryGitData(ctx context.Context, u *user_model.User,
repo *repo_model.Repository, opts migration.MigrateOptions,
httpTransport *http.Transport,
) (*repo_model.Repository, error) {
repoPath := repo_model.RepoPath(u.Name, opts.RepoName)
if u.IsOrganization() {
t, err := organization.OrgFromUser(u).GetOwnerTeam(ctx)
if err != nil {
return nil, err
}
repo.NumWatches = t.NumMembers
} else {
repo.NumWatches = 1
}
migrateTimeout := time.Duration(setting.Git.Timeout.Migrate) * time.Second
var err error
if err = util.RemoveAll(repoPath); err != nil {
return repo, fmt.Errorf("failed to remove %s: %w", repoPath, err)
}
if err = git.Clone(ctx, opts.CloneAddr, repoPath, git.CloneRepoOptions{
Mirror: true,
Quiet: true,
Timeout: migrateTimeout,
SkipTLSVerify: setting.Migrations.SkipTLSVerify,
}); err != nil {
if errors.Is(err, context.DeadlineExceeded) {
return repo, fmt.Errorf("clone timed out. Consider increasing [git.timeout] MIGRATE in app.ini. Underlying Error: %w", err)
}
return repo, fmt.Errorf("clone: %w", err)
}
if err := git.WriteCommitGraph(ctx, repoPath); err != nil {
return repo, err
}
if opts.Wiki {
wikiPath := repo_model.WikiPath(u.Name, opts.RepoName)
wikiRemotePath := WikiRemoteURL(ctx, opts.CloneAddr)
if len(wikiRemotePath) > 0 {
if err := util.RemoveAll(wikiPath); err != nil {
return repo, fmt.Errorf("failed to remove %s: %w", wikiPath, err)
}
if err := git.Clone(ctx, wikiRemotePath, wikiPath, git.CloneRepoOptions{
Mirror: true,
Quiet: true,
Timeout: migrateTimeout,
Branch: "master",
SkipTLSVerify: setting.Migrations.SkipTLSVerify,
}); err != nil {
log.Warn("Clone wiki: %v", err)
if err := util.RemoveAll(wikiPath); err != nil {
return repo, fmt.Errorf("failed to remove %s: %w", wikiPath, err)
}
} else {
if err := git.WriteCommitGraph(ctx, wikiPath); err != nil {
return repo, err
}
}
}
}
if repo.OwnerID == u.ID {
repo.Owner = u
}
if err = CheckDaemonExportOK(ctx, repo); err != nil {
return repo, fmt.Errorf("checkDaemonExportOK: %w", err)
}
if stdout, _, err := git.NewCommand(ctx, "update-server-info").
SetDescription(fmt.Sprintf("MigrateRepositoryGitData(git update-server-info): %s", repoPath)).
RunStdString(&git.RunOpts{Dir: repoPath}); err != nil {
log.Error("MigrateRepositoryGitData(git update-server-info) in %v: Stdout: %s\nError: %v", repo, stdout, err)
return repo, fmt.Errorf("error in MigrateRepositoryGitData(git update-server-info): %w", err)
}
gitRepo, err := git.OpenRepository(ctx, repoPath)
if err != nil {
return repo, fmt.Errorf("OpenRepository: %w", err)
}
defer gitRepo.Close()
repo.IsEmpty, err = gitRepo.IsEmpty()
if err != nil {
return repo, fmt.Errorf("git.IsEmpty: %w", err)
}
if !repo.IsEmpty {
if len(repo.DefaultBranch) == 0 {
// Try to get HEAD branch and set it as default branch.
headBranch, err := gitRepo.GetHEADBranch()
if err != nil {
return repo, fmt.Errorf("GetHEADBranch: %w", err)
}
if headBranch != nil {
repo.DefaultBranch = headBranch.Name
}
}
if _, err := SyncRepoBranchesWithRepo(ctx, repo, gitRepo, u.ID); err != nil {
return repo, fmt.Errorf("SyncRepoBranchesWithRepo: %v", err)
}
if !opts.Releases {
// note: this will greatly improve release (tag) sync
// for pull-mirrors with many tags
repo.IsMirror = opts.Mirror
if err = SyncReleasesWithTags(ctx, repo, gitRepo, true); err != nil {
log.Error("Failed to synchronize tags to releases for repository: %v", err)
}
}
if opts.LFS {
endpoint := lfs.DetermineEndpoint(opts.CloneAddr, opts.LFSEndpoint)
lfsClient := lfs.NewClient(endpoint, httpTransport)
if err = StoreMissingLfsObjectsInRepository(ctx, repo, gitRepo, lfsClient); err != nil {
log.Error("Failed to store missing LFS objects for repository: %v", err)
}
}
}
ctx, committer, err := db.TxContext(ctx)
if err != nil {
return nil, err
}
defer committer.Close()
if opts.Mirror {
remoteAddress, err := util.SanitizeURL(opts.CloneAddr)
if err != nil {
return repo, err
}
mirrorModel := repo_model.Mirror{
RepoID: repo.ID,
Interval: setting.Mirror.DefaultInterval,
SyncWiki: opts.Wiki,
SyncIssues: opts.Issues,
SyncMilestones: opts.Milestones,
SyncLabels: opts.Labels,
SyncReleases: opts.Releases,
SyncComments: opts.Comments,
SyncPullRequests: opts.PullRequests,
EnablePrune: true,
NextUpdateUnix: timeutil.TimeStampNow().AddDuration(setting.Mirror.DefaultInterval),
LFS: opts.LFS,
RemoteAddress: remoteAddress,
}
if opts.LFS {
mirrorModel.LFSEndpoint = opts.LFSEndpoint
}
if opts.MirrorInterval != "" {
parsedInterval, err := time.ParseDuration(opts.MirrorInterval)
if err != nil {
log.Error("Failed to set Interval: %v", err)
return repo, err
}
if parsedInterval == 0 {
mirrorModel.Interval = 0
mirrorModel.NextUpdateUnix = 0
} else if parsedInterval < setting.Mirror.MinInterval {
err := fmt.Errorf("interval %s is set below Minimum Interval of %s", parsedInterval, setting.Mirror.MinInterval)
log.Error("Interval: %s is too frequent", opts.MirrorInterval)
return repo, err
} else {
mirrorModel.Interval = parsedInterval
mirrorModel.NextUpdateUnix = timeutil.TimeStampNow().AddDuration(parsedInterval)
}
}
if err = repo_model.InsertMirror(ctx, &mirrorModel); err != nil {
return repo, fmt.Errorf("InsertOne: %w", err)
}
repo.IsMirror = true
if err = UpdateRepository(ctx, repo, false); err != nil {
return nil, err
}
// this is necessary for sync local tags from remote
configName := fmt.Sprintf("remote.%s.fetch", mirrorModel.GetRemoteName())
if stdout, _, err := git.NewCommand(ctx, "config").
AddOptionValues("--add", configName, `+refs/tags/*:refs/tags/*`).
RunStdString(&git.RunOpts{Dir: repoPath}); err != nil {
log.Error("MigrateRepositoryGitData(git config --add <remote> +refs/tags/*:refs/tags/*) in %v: Stdout: %s\nError: %v", repo, stdout, err)
return repo, fmt.Errorf("error in MigrateRepositoryGitData(git config --add <remote> +refs/tags/*:refs/tags/*): %w", err)
}
} else {
if err = UpdateRepoSize(ctx, repo); err != nil {
log.Error("Failed to update size for repository: %v", err)
}
if repo, err = CleanUpMigrateInfo(ctx, repo); err != nil {
return nil, err
}
}
return repo, committer.Commit()
}
// cleanUpMigrateGitConfig removes mirror info which prevents "push --all".
// This also removes possible user credentials.
func cleanUpMigrateGitConfig(ctx context.Context, repoPath string) error {
cmd := git.NewCommand(ctx, "remote", "rm", "origin")
// if the origin does not exist
_, stderr, err := cmd.RunStdString(&git.RunOpts{
Dir: repoPath,
})
if err != nil && !strings.HasPrefix(stderr, "fatal: No such remote") {
return err
}
return nil
}
// CleanUpMigrateInfo finishes migrating repository and/or wiki with things that don't need to be done for mirrors.
func CleanUpMigrateInfo(ctx context.Context, repo *repo_model.Repository) (*repo_model.Repository, error) {
repoPath := repo.RepoPath()
if err := CreateDelegateHooks(repoPath); err != nil {
return repo, fmt.Errorf("createDelegateHooks: %w", err)
}
if repo.HasWiki() {
if err := CreateDelegateHooks(repo.WikiPath()); err != nil {
return repo, fmt.Errorf("createDelegateHooks.(wiki): %w", err)
}
}
_, _, err := git.NewCommand(ctx, "remote", "rm", "origin").RunStdString(&git.RunOpts{Dir: repoPath})
if err != nil && !strings.HasPrefix(err.Error(), "exit status 128 - fatal: No such remote ") {
return repo, fmt.Errorf("CleanUpMigrateInfo: %w", err)
}
if repo.HasWiki() {
if err := cleanUpMigrateGitConfig(ctx, repo.WikiPath()); err != nil {
return repo, fmt.Errorf("cleanUpMigrateGitConfig (wiki): %w", err)
}
}
return repo, UpdateRepository(ctx, repo, false)
}
// SyncRepoTags synchronizes releases table with repository tags
func SyncRepoTags(ctx context.Context, repoID int64) error {
repo, err := repo_model.GetRepositoryByID(ctx, repoID)
@ -55,17 +305,19 @@ func SyncRepoTags(ctx context.Context, repoID int64) error {
}
defer gitRepo.Close()
return SyncReleasesWithTags(ctx, repo, gitRepo)
return SyncReleasesWithTags(ctx, repo, gitRepo, false)
}
// SyncReleasesWithTags synchronizes release table with repository tags
func SyncReleasesWithTags(ctx context.Context, repo *repo_model.Repository, gitRepo *git.Repository) error {
// SyncReleasesWithTags synchronizes release table with repository tags for each of the releases.
//
// If tagOnlyReleases is true, then it is assumed all releases come from tags.
func SyncReleasesWithTags(ctx context.Context, repo *repo_model.Repository, gitRepo *git.Repository, tagOnlyReleases bool) error {
log.Debug("SyncReleasesWithTags: in Repo[%d:%s/%s]", repo.ID, repo.OwnerName, repo.Name)
// optimized procedure for pull-mirrors which saves a lot of time (in
// particular for repos with many tags).
if repo.IsMirror {
return pullMirrorReleaseSync(ctx, repo, gitRepo)
if repo.IsMirror && tagOnlyReleases {
return recreateMirrorReleaseFromTags(ctx, repo, gitRepo)
}
existingRelTags := make(container.Set[string])
@ -280,14 +532,14 @@ func (shortRelease) TableName() string {
return "release"
}
// pullMirrorReleaseSync is a pull-mirror specific tag<->release table
// recreateMirrorReleaseFromTags is a pull-mirror specific tag<->release table
// synchronization which overwrites all Releases from the repository tags. This
// can be relied on since a pull-mirror is always identical to its
// upstream. Hence, after each sync we want the pull-mirror release set to be
// identical to the upstream tag set. This is much more efficient for
// repositories like https://github.com/vim/vim (with over 13000 tags).
func pullMirrorReleaseSync(ctx context.Context, repo *repo_model.Repository, gitRepo *git.Repository) error {
log.Trace("pullMirrorReleaseSync: rebuilding releases for pull-mirror Repo[%d:%s/%s]", repo.ID, repo.OwnerName, repo.Name)
func recreateMirrorReleaseFromTags(ctx context.Context, repo *repo_model.Repository, gitRepo *git.Repository) error {
log.Trace("recreateMirrorReleaseFromTags: rebuilding releases for pull-mirror Repo[%d:%s/%s]", repo.ID, repo.OwnerName, repo.Name)
tags, numTags, err := gitRepo.GetTagInfos(0, 0)
if err != nil {
return fmt.Errorf("unable to GetTagInfos in pull-mirror Repo[%d:%s/%s]: %w", repo.ID, repo.OwnerName, repo.Name, err)
@ -348,7 +600,7 @@ func pullMirrorReleaseSync(ctx context.Context, repo *repo_model.Repository, git
return fmt.Errorf("unable to rebuild release table for pull-mirror Repo[%d:%s/%s]: %w", repo.ID, repo.OwnerName, repo.Name, err)
}
log.Trace("pullMirrorReleaseSync: done rebuilding %d releases", numTags)
log.Trace("recreateMirrorReleaseFromTags: done rebuilding %d releases", numTags)
return nil
}

View File

@ -161,14 +161,6 @@ func Migrate(ctx *context.APIContext) {
GitServiceType: gitServiceType,
MirrorInterval: form.MirrorInterval,
}
if opts.Mirror {
opts.Issues = false
opts.Milestones = false
opts.Labels = false
opts.Comments = false
opts.PullRequests = false
opts.Releases = false
}
repo, err := repo_service.CreateRepositoryDirectly(ctx, ctx.Doer, repoOwner, repo_service.CreateRepoOptions{
Name: opts.RepoName,

View File

@ -223,14 +223,6 @@ func MigratePost(ctx *context.Context) {
PullRequests: form.PullRequests,
Releases: form.Releases,
}
if opts.Mirror {
opts.Issues = false
opts.Milestones = false
opts.Labels = false
opts.Comments = false
opts.PullRequests = false
opts.Releases = false
}
err = repo_model.CheckCreateRepository(ctx, ctx.Doer, ctxUser, opts.RepoName, false)
if err != nil {

View File

@ -714,6 +714,7 @@ func RepoAssignment(ctx *Context) context.CancelFunc {
}
ctx.Data["CanCompareOrPull"] = canCompare
ctx.Data["PullRequestCtx"] = ctx.Repo.PullRequest
ctx.Data["AllowsIssues"] = repo.AllowsIssues(ctx)
if ctx.Repo.Repository.Status == repo_model.RepositoryPendingTransfer {
repoTransfer, err := models.GetPendingRepositoryTransfer(ctx, ctx.Repo.Repository)

View File

@ -32,6 +32,7 @@ var _ base.Uploader = &RepositoryDumper{}
// RepositoryDumper implements an Uploader to the local directory
type RepositoryDumper struct {
base.NullUploader
ctx context.Context
baseDir string
repoOwner string

View File

@ -42,6 +42,7 @@ var _ base.Uploader = &GiteaLocalUploader{}
// GiteaLocalUploader implements an Uploader to gitea sites
type GiteaLocalUploader struct {
base.NullUploader
ctx context.Context
doer *user_model.User
repoOwner string
@ -132,6 +133,12 @@ func (g *GiteaLocalUploader) CreateRepo(repo *base.Repository, opts base.Migrate
Private: repo.IsPrivate,
Wiki: opts.Wiki,
Releases: opts.Releases, // if didn't get releases, then sync them from tags
Issues: opts.Issues,
Milestones: opts.Milestones,
Labels: opts.Labels,
Comments: opts.Comments,
PullRequests: opts.PullRequests,
ReleaseAssets: opts.ReleaseAssets,
MirrorInterval: opts.MirrorInterval,
}, NewMigrationHTTPTransport())
@ -161,8 +168,7 @@ func (g *GiteaLocalUploader) Close() {
}
}
// CreateTopics creates topics
func (g *GiteaLocalUploader) CreateTopics(topics ...string) error {
func filterTopicsForDB(topics []string) []string {
// Ignore topics too long for the db
c := 0
for _, topic := range topics {
@ -174,11 +180,16 @@ func (g *GiteaLocalUploader) CreateTopics(topics ...string) error {
c++
}
topics = topics[:c]
return topics
}
// CreateTopics creates topics
func (g *GiteaLocalUploader) CreateTopics(topics ...string) error {
topics = filterTopicsForDB(topics)
return repo_model.SaveTopics(g.ctx, g.repo.ID, topics...)
}
// CreateMilestones creates milestones
func (g *GiteaLocalUploader) CreateMilestones(milestones ...*base.Milestone) error {
func (g *GiteaLocalUploader) prepareMilestones(milestones ...*base.Milestone) []*issues_model.Milestone {
mss := make([]*issues_model.Milestone, 0, len(milestones))
for _, milestone := range milestones {
var deadline timeutil.TimeStamp
@ -210,13 +221,19 @@ func (g *GiteaLocalUploader) CreateMilestones(milestones ...*base.Milestone) err
CreatedUnix: timeutil.TimeStamp(milestone.Created.Unix()),
UpdatedUnix: timeutil.TimeStamp(milestone.Updated.Unix()),
DeadlineUnix: deadline,
OriginalID: milestone.OriginalID,
}
if ms.IsClosed && milestone.Closed != nil {
ms.ClosedDateUnix = timeutil.TimeStamp(milestone.Closed.Unix())
}
mss = append(mss, &ms)
}
return mss
}
// CreateMilestones creates milestones
func (g *GiteaLocalUploader) CreateMilestones(milestones ...*base.Milestone) error {
mss := g.prepareMilestones(milestones...)
err := issues_model.InsertMilestones(g.ctx, mss...)
if err != nil {
return err
@ -230,6 +247,17 @@ func (g *GiteaLocalUploader) CreateMilestones(milestones ...*base.Milestone) err
// CreateLabels creates labels
func (g *GiteaLocalUploader) CreateLabels(labels ...*base.Label) error {
lbs := g.convertLabels(labels...)
if err := issues_model.NewLabels(g.ctx, lbs...); err != nil {
return err
}
for _, lb := range lbs {
g.labels[lb.Name] = lb
}
return nil
}
func (g *GiteaLocalUploader) convertLabels(labels ...*base.Label) []*issues_model.Label {
lbs := make([]*issues_model.Label, 0, len(labels))
for _, l := range labels {
if color, err := label.NormalizeColor(l.Color); err != nil {
@ -245,21 +273,13 @@ func (g *GiteaLocalUploader) CreateLabels(labels ...*base.Label) error {
Exclusive: l.Exclusive,
Description: l.Description,
Color: l.Color,
OriginalID: l.OriginalID,
})
}
err := issues_model.NewLabels(g.ctx, lbs...)
if err != nil {
return err
}
for _, lb := range lbs {
g.labels[lb.Name] = lb
}
return nil
return lbs
}
// CreateReleases creates releases
func (g *GiteaLocalUploader) CreateReleases(releases ...*base.Release) error {
func (g *GiteaLocalUploader) prepareReleases(releases ...*base.Release) ([]*repo_model.Release, error) {
rels := make([]*repo_model.Release, 0, len(releases))
for _, release := range releases {
if release.Created.IsZero() {
@ -294,7 +314,7 @@ func (g *GiteaLocalUploader) CreateReleases(releases ...*base.Release) error {
}
if err := g.remapUser(release, &rel); err != nil {
return err
return nil, err
}
// calc NumCommits if possible
@ -302,12 +322,12 @@ func (g *GiteaLocalUploader) CreateReleases(releases ...*base.Release) error {
commit, err := g.gitRepo.GetTagCommit(rel.TagName)
if !git.IsErrNotExist(err) {
if err != nil {
return fmt.Errorf("GetTagCommit[%v]: %w", rel.TagName, err)
return nil, fmt.Errorf("GetTagCommit[%v]: %w", rel.TagName, err)
}
rel.Sha1 = commit.ID.String()
rel.NumCommits, err = commit.CommitsCount()
if err != nil {
return fmt.Errorf("CommitsCount: %w", err)
return nil, fmt.Errorf("CommitsCount: %w", err)
}
}
}
@ -353,7 +373,7 @@ func (g *GiteaLocalUploader) CreateReleases(releases ...*base.Release) error {
return err
}()
if err != nil {
return err
return nil, err
}
rel.Attachments = append(rel.Attachments, &attach)
@ -361,17 +381,25 @@ func (g *GiteaLocalUploader) CreateReleases(releases ...*base.Release) error {
rels = append(rels, &rel)
}
return rels, nil
}
// CreateReleases creates releases
func (g *GiteaLocalUploader) CreateReleases(releases ...*base.Release) error {
rels, err := g.prepareReleases(releases...)
if err != nil {
return err
}
return repo_model.InsertReleases(g.ctx, rels...)
}
// SyncTags syncs releases with tags in the database
// SyncTags syncs releases with tags in the databases
func (g *GiteaLocalUploader) SyncTags() error {
return repo_module.SyncReleasesWithTags(g.ctx, g.repo, g.gitRepo)
return repo_module.SyncReleasesWithTags(g.ctx, g.repo, g.gitRepo, false)
}
// CreateIssues creates issues
func (g *GiteaLocalUploader) CreateIssues(issues ...*base.Issue) error {
func (g *GiteaLocalUploader) prepareIssues(issues ...*base.Issue) ([]*issues_model.Issue, error) {
iss := make([]*issues_model.Issue, 0, len(issues))
for _, issue := range issues {
var labels []*issues_model.Label
@ -421,7 +449,7 @@ func (g *GiteaLocalUploader) CreateIssues(issues ...*base.Issue) error {
}
if err := g.remapUser(issue, &is); err != nil {
return err
return nil, err
}
if issue.Closed != nil {
@ -434,34 +462,45 @@ func (g *GiteaLocalUploader) CreateIssues(issues ...*base.Issue) error {
CreatedUnix: timeutil.TimeStampNow(),
}
if err := g.remapUser(reaction, &res); err != nil {
return err
return nil, err
}
is.Reactions = append(is.Reactions, &res)
}
iss = append(iss, &is)
}
return iss, nil
}
if len(iss) > 0 {
if err := issues_model.InsertIssues(g.ctx, iss...); err != nil {
return err
}
for _, is := range iss {
g.issues[is.Index] = is
}
// CreateIssues creates issues
func (g *GiteaLocalUploader) CreateIssues(issues ...*base.Issue) error {
iss, err := g.prepareIssues(issues...)
if err != nil {
return err
}
if len(iss) == 0 {
return nil
}
if err := issues_model.InsertIssues(g.ctx, iss...); err != nil {
return err
}
for _, is := range iss {
g.issues[is.Index] = is
}
return nil
}
// CreateComments creates comments of issues
func (g *GiteaLocalUploader) CreateComments(comments ...*base.Comment) error {
func (g *GiteaLocalUploader) prepareComments(comments ...*base.Comment) ([]*issues_model.Comment, error) {
cms := make([]*issues_model.Comment, 0, len(comments))
for _, comment := range comments {
var issue *issues_model.Issue
issue, ok := g.issues[comment.IssueIndex]
if !ok {
return fmt.Errorf("comment references non existent IssueIndex %d", comment.IssueIndex)
// ignore comments for non existent issues
// It can happen when a comment belongs to a pull request, but the pull request is not imported
log.Warn("Ignoring comment for non existent issue %d", comment.IssueIndex)
continue
}
if comment.Created.IsZero() {
@ -480,6 +519,7 @@ func (g *GiteaLocalUploader) CreateComments(comments ...*base.Comment) error {
Content: comment.Content,
CreatedUnix: timeutil.TimeStamp(comment.Created.Unix()),
UpdatedUnix: timeutil.TimeStamp(comment.Updated.Unix()),
OriginalID: comment.OriginalID,
}
switch cm.Type {
@ -515,7 +555,7 @@ func (g *GiteaLocalUploader) CreateComments(comments ...*base.Comment) error {
}
if err := g.remapUser(comment, &cm); err != nil {
return err
return nil, err
}
// add reactions
@ -525,13 +565,22 @@ func (g *GiteaLocalUploader) CreateComments(comments ...*base.Comment) error {
CreatedUnix: timeutil.TimeStampNow(),
}
if err := g.remapUser(reaction, &res); err != nil {
return err
return nil, err
}
cm.Reactions = append(cm.Reactions, &res)
}
cms = append(cms, &cm)
}
return cms, nil
}
// CreateComments creates comments of issues
func (g *GiteaLocalUploader) CreateComments(comments ...*base.Comment) error {
cms, err := g.prepareComments(comments...)
if err != nil {
return err
}
if len(cms) == 0 {
return nil
@ -539,21 +588,29 @@ func (g *GiteaLocalUploader) CreateComments(comments ...*base.Comment) error {
return issues_model.InsertIssueComments(g.ctx, cms)
}
// CreatePullRequests creates pull requests
func (g *GiteaLocalUploader) CreatePullRequests(prs ...*base.PullRequest) error {
func (g *GiteaLocalUploader) preparePullRequests(prs ...*base.PullRequest) ([]*issues_model.PullRequest, error) {
gprs := make([]*issues_model.PullRequest, 0, len(prs))
for _, pr := range prs {
gpr, err := g.newPullRequest(pr)
gpr, err := g.getPullRequest(pr)
if err != nil {
return err
return nil, err
}
if err := g.remapUser(pr, gpr.Issue); err != nil {
return err
return nil, err
}
gprs = append(gprs, gpr)
}
return gprs, nil
}
// CreatePullRequests creates pull requests
func (g *GiteaLocalUploader) CreatePullRequests(prs ...*base.PullRequest) error {
gprs, err := g.preparePullRequests(prs...)
if err != nil {
return err
}
if err := issues_model.InsertPullRequests(g.ctx, gprs...); err != nil {
return err
}
@ -717,7 +774,7 @@ func (g *GiteaLocalUploader) updateGitForPullRequest(pr *base.PullRequest) (head
return head, nil
}
func (g *GiteaLocalUploader) newPullRequest(pr *base.PullRequest) (*issues_model.PullRequest, error) {
func (g *GiteaLocalUploader) getPullRequest(pr *base.PullRequest) (*issues_model.PullRequest, error) {
var labels []*issues_model.Label
for _, label := range pr.Labels {
lb, ok := g.labels[label.Name]
@ -834,14 +891,13 @@ func convertReviewState(state string) issues_model.ReviewType {
}
}
// CreateReviews create pull request reviews of currently migrated issues
func (g *GiteaLocalUploader) CreateReviews(reviews ...*base.Review) error {
func (g *GiteaLocalUploader) prepareReviews(reviews ...*base.Review) ([]*issues_model.Review, error) {
cms := make([]*issues_model.Review, 0, len(reviews))
for _, review := range reviews {
var issue *issues_model.Issue
issue, ok := g.issues[review.IssueIndex]
if !ok {
return fmt.Errorf("review references non existent IssueIndex %d", review.IssueIndex)
return nil, fmt.Errorf("review references non existent IssueIndex %d", review.IssueIndex)
}
if review.CreatedAt.IsZero() {
review.CreatedAt = time.Unix(int64(issue.CreatedUnix), 0)
@ -854,10 +910,11 @@ func (g *GiteaLocalUploader) CreateReviews(reviews ...*base.Review) error {
Official: review.Official,
CreatedUnix: timeutil.TimeStamp(review.CreatedAt.Unix()),
UpdatedUnix: timeutil.TimeStamp(review.CreatedAt.Unix()),
OriginalID: review.OriginalID,
}
if err := g.remapUser(review, &cm); err != nil {
return err
return nil, err
}
cms = append(cms, &cm)
@ -868,12 +925,12 @@ func (g *GiteaLocalUploader) CreateReviews(reviews ...*base.Review) error {
var err error
pr, err = issues_model.GetPullRequestByIssueIDWithNoAttributes(g.ctx, issue.ID)
if err != nil {
return err
return nil, err
}
g.prCache[issue.ID] = pr
}
if pr.MergeBase == "" {
// No mergebase -> no basis for any patches
// No merge base -> no basis for any patches
log.Warn("PR #%d in %s/%s: does not have a merge base, all review comments will be ignored", pr.Index, g.repoOwner, g.repoName)
continue
}
@ -937,16 +994,121 @@ func (g *GiteaLocalUploader) CreateReviews(reviews ...*base.Review) error {
}
if err := g.remapUser(review, &c); err != nil {
return err
return nil, err
}
cm.Comments = append(cm.Comments, &c)
}
}
return cms, nil
}
// CreateReviews create pull request reviews of currently migrated issues
func (g *GiteaLocalUploader) CreateReviews(reviews ...*base.Review) error {
cms, err := g.prepareReviews(reviews...)
if err != nil {
return err
}
return issues_model.InsertReviews(g.ctx, cms)
}
// UpdateTopics updates topics
func (g *GiteaLocalUploader) UpdateTopics(topics ...string) error {
topics = filterTopicsForDB(topics)
return repo_model.SaveTopics(g.ctx, g.repo.ID, topics...)
}
func (g *GiteaLocalUploader) UpdateMilestones(milestones ...*base.Milestone) error {
mss := g.prepareMilestones(milestones...)
err := issues_model.UpdateMilestones(g.ctx, mss...)
if err != nil {
return err
}
for _, ms := range mss {
g.milestones[ms.Name] = ms.ID
}
return nil
}
func (g *GiteaLocalUploader) UpdateLabels(labels ...*base.Label) error {
lbs := g.convertLabels(labels...)
if err := issues_model.UpdateLabelsByRepoID(g.ctx, g.repo.ID, lbs...); err != nil {
return err
}
for _, lb := range lbs {
g.labels[lb.Name] = lb
}
return nil
}
func (g *GiteaLocalUploader) PatchReleases(releases ...*base.Release) error {
// TODO: needs performance improvement
rels, err := g.prepareReleases(releases...)
if err != nil {
return err
}
return repo_model.UpsertReleases(g.ctx, rels...)
}
func (g *GiteaLocalUploader) PatchIssues(issues ...*base.Issue) error {
iss, err := g.prepareIssues(issues...)
if err != nil {
return err
}
if len(iss) == 0 {
return nil
}
if err := issues_model.UpsertIssues(g.ctx, iss...); err != nil {
return err
}
for _, is := range iss {
g.issues[is.Index] = is
}
return nil
}
func (g *GiteaLocalUploader) PatchComments(comments ...*base.Comment) error {
cms, err := g.prepareComments(comments...)
if err != nil {
return err
}
if len(cms) == 0 {
return nil
}
return issues_model.UpsertIssueComments(g.ctx, cms)
}
func (g *GiteaLocalUploader) PatchPullRequests(prs ...*base.PullRequest) error {
gprs, err := g.preparePullRequests(prs...)
if err != nil {
return err
}
if err := issues_model.UpsertPullRequests(g.ctx, gprs...); err != nil {
return err
}
for _, pr := range gprs {
g.issues[pr.Issue.Index] = pr.Issue
pull.AddToTaskQueue(g.ctx, pr)
}
return nil
}
func (g *GiteaLocalUploader) PatchReviews(reviews ...*base.Review) error {
cms, err := g.prepareReviews(reviews...)
if err != nil {
return err
}
return issues_model.UpsertReviews(g.ctx, cms)
}
// Rollback when migrating failed, this will rollback all the changes.
func (g *GiteaLocalUploader) Rollback() error {
if g.repo != nil && g.repo.ID > 0 {

View File

@ -31,8 +31,10 @@ import (
)
func TestGiteaUploadRepo(t *testing.T) {
// FIXME: Since no accesskey or user/password will trigger rate limit of github, just skip
t.Skip()
token := os.Getenv("GITHUB_READ_TOKEN")
if token == "" {
t.Skip("Skipping GitHub migration test because GITHUB_READ_TOKEN is empty")
}
unittest.PrepareTestEnv(t)
@ -40,7 +42,7 @@ func TestGiteaUploadRepo(t *testing.T) {
var (
ctx = context.Background()
downloader = NewGithubDownloaderV3(ctx, "https://github.com", "", "", "", "go-xorm", "builder")
downloader = NewGithubDownloaderV3(ctx, "https://github.com", "", "", token, "go-xorm", "builder")
repoName = "builder-" + time.Now().Format("2006-01-02-15-04-05")
uploader = NewGiteaLocalUploader(graceful.GetManager().HammerContext(), user, user.Name, repoName)
)

View File

@ -120,6 +120,10 @@ func NewGithubDownloaderV3(ctx context.Context, baseURL, userName, password, tok
return &downloader
}
func (g *GithubDownloaderV3) SupportSyncing() bool {
return true
}
// String implements Stringer
func (g *GithubDownloaderV3) String() string {
return fmt.Sprintf("migration from github server %s %s/%s", g.baseURL, g.repoOwner, g.repoName)
@ -261,6 +265,7 @@ func (g *GithubDownloaderV3) GetMilestones() ([]*base.Milestone, error) {
Created: m.GetCreatedAt().Time,
Updated: m.UpdatedAt.GetTime(),
Closed: m.ClosedAt.GetTime(),
OriginalID: m.GetID(),
})
}
if len(ms) < perPage {
@ -275,6 +280,7 @@ func convertGithubLabel(label *github.Label) *base.Label {
Name: label.GetName(),
Color: label.GetColor(),
Description: label.GetDescription(),
OriginalID: label.GetID(),
}
}
@ -412,89 +418,7 @@ func (g *GithubDownloaderV3) GetReleases() ([]*base.Release, error) {
// GetIssues returns issues according start and limit
func (g *GithubDownloaderV3) GetIssues(page, perPage int) ([]*base.Issue, bool, error) {
if perPage > g.maxPerPage {
perPage = g.maxPerPage
}
opt := &github.IssueListByRepoOptions{
Sort: "created",
Direction: "asc",
State: "all",
ListOptions: github.ListOptions{
PerPage: perPage,
Page: page,
},
}
allIssues := make([]*base.Issue, 0, perPage)
g.waitAndPickClient()
issues, resp, err := g.getClient().Issues.ListByRepo(g.ctx, g.repoOwner, g.repoName, opt)
if err != nil {
return nil, false, fmt.Errorf("error while listing repos: %w", err)
}
log.Trace("Request get issues %d/%d, but in fact get %d", perPage, page, len(issues))
g.setRate(&resp.Rate)
for _, issue := range issues {
if issue.IsPullRequest() {
continue
}
labels := make([]*base.Label, 0, len(issue.Labels))
for _, l := range issue.Labels {
labels = append(labels, convertGithubLabel(l))
}
// get reactions
var reactions []*base.Reaction
if !g.SkipReactions {
for i := 1; ; i++ {
g.waitAndPickClient()
res, resp, err := g.getClient().Reactions.ListIssueReactions(g.ctx, g.repoOwner, g.repoName, issue.GetNumber(), &github.ListOptions{
Page: i,
PerPage: perPage,
})
if err != nil {
return nil, false, err
}
g.setRate(&resp.Rate)
if len(res) == 0 {
break
}
for _, reaction := range res {
reactions = append(reactions, &base.Reaction{
UserID: reaction.User.GetID(),
UserName: reaction.User.GetLogin(),
Content: reaction.GetContent(),
})
}
}
}
var assignees []string
for i := range issue.Assignees {
assignees = append(assignees, issue.Assignees[i].GetLogin())
}
allIssues = append(allIssues, &base.Issue{
Title: *issue.Title,
Number: int64(*issue.Number),
PosterID: issue.GetUser().GetID(),
PosterName: issue.GetUser().GetLogin(),
PosterEmail: issue.GetUser().GetEmail(),
Content: issue.GetBody(),
Milestone: issue.GetMilestone().GetTitle(),
State: issue.GetState(),
Created: issue.GetCreatedAt().Time,
Updated: issue.GetUpdatedAt().Time,
Labels: labels,
Reactions: reactions,
Closed: issue.ClosedAt.GetTime(),
IsLocked: issue.GetLocked(),
Assignees: assignees,
ForeignIndex: int64(*issue.Number),
})
}
return allIssues, len(issues) < perPage, nil
return g.getIssuesSince(page, perPage, time.Time{}) // set since to empty to get all issues
}
// SupportGetRepoComments return true if it supports get repo comments
@ -504,11 +428,11 @@ func (g *GithubDownloaderV3) SupportGetRepoComments() bool {
// GetComments returns comments according issueNumber
func (g *GithubDownloaderV3) GetComments(commentable base.Commentable) ([]*base.Comment, bool, error) {
comments, err := g.getComments(commentable)
comments, err := g.getCommentsSince(commentable, nil)
return comments, false, err
}
func (g *GithubDownloaderV3) getComments(commentable base.Commentable) ([]*base.Comment, error) {
func (g *GithubDownloaderV3) getCommentsSince(commentable base.Commentable, since *time.Time) ([]*base.Comment, error) {
var (
allComments = make([]*base.Comment, 0, g.maxPerPage)
created = "created"
@ -517,6 +441,7 @@ func (g *GithubDownloaderV3) getComments(commentable base.Commentable) ([]*base.
opt := &github.IssueListCommentsOptions{
Sort: &created,
Direction: &asc,
Since: since,
ListOptions: github.ListOptions{
PerPage: g.maxPerPage,
},
@ -565,6 +490,7 @@ func (g *GithubDownloaderV3) getComments(commentable base.Commentable) ([]*base.
Created: comment.GetCreatedAt().Time,
Updated: comment.GetUpdatedAt().Time,
Reactions: reactions,
OriginalID: comment.GetID(),
})
}
if resp.NextPage == 0 {
@ -577,6 +503,12 @@ func (g *GithubDownloaderV3) getComments(commentable base.Commentable) ([]*base.
// GetAllComments returns repository comments according page and perPageSize
func (g *GithubDownloaderV3) GetAllComments(page, perPage int) ([]*base.Comment, bool, error) {
return g.getAllCommentsSince(page, perPage, nil)
}
// GetAllCommentsSince returns repository comments since a time.
// If since is nil, it will return all comments.
func (g *GithubDownloaderV3) getAllCommentsSince(page, perPage int, since *time.Time) ([]*base.Comment, bool, error) {
var (
allComments = make([]*base.Comment, 0, perPage)
created = "created"
@ -588,6 +520,7 @@ func (g *GithubDownloaderV3) GetAllComments(page, perPage int) ([]*base.Comment,
opt := &github.IssueListCommentsOptions{
Sort: &created,
Direction: &asc,
Since: since,
ListOptions: github.ListOptions{
Page: page,
PerPage: perPage,
@ -641,6 +574,7 @@ func (g *GithubDownloaderV3) GetAllComments(page, perPage int) ([]*base.Comment,
Created: comment.GetCreatedAt().Time,
Updated: comment.GetUpdatedAt().Time,
Reactions: reactions,
OriginalID: comment.GetID(),
})
}
@ -649,104 +583,11 @@ func (g *GithubDownloaderV3) GetAllComments(page, perPage int) ([]*base.Comment,
// GetPullRequests returns pull requests according page and perPage
func (g *GithubDownloaderV3) GetPullRequests(page, perPage int) ([]*base.PullRequest, bool, error) {
if perPage > g.maxPerPage {
perPage = g.maxPerPage
}
opt := &github.PullRequestListOptions{
Sort: "created",
Direction: "asc",
State: "all",
ListOptions: github.ListOptions{
PerPage: perPage,
Page: page,
},
}
allPRs := make([]*base.PullRequest, 0, perPage)
g.waitAndPickClient()
prs, resp, err := g.getClient().PullRequests.List(g.ctx, g.repoOwner, g.repoName, opt)
if err != nil {
return nil, false, fmt.Errorf("error while listing repos: %w", err)
}
log.Trace("Request get pull requests %d/%d, but in fact get %d", perPage, page, len(prs))
g.setRate(&resp.Rate)
for _, pr := range prs {
labels := make([]*base.Label, 0, len(pr.Labels))
for _, l := range pr.Labels {
labels = append(labels, convertGithubLabel(l))
}
// get reactions
var reactions []*base.Reaction
if !g.SkipReactions {
for i := 1; ; i++ {
g.waitAndPickClient()
res, resp, err := g.getClient().Reactions.ListIssueReactions(g.ctx, g.repoOwner, g.repoName, pr.GetNumber(), &github.ListOptions{
Page: i,
PerPage: perPage,
})
if err != nil {
return nil, false, err
}
g.setRate(&resp.Rate)
if len(res) == 0 {
break
}
for _, reaction := range res {
reactions = append(reactions, &base.Reaction{
UserID: reaction.User.GetID(),
UserName: reaction.User.GetLogin(),
Content: reaction.GetContent(),
})
}
}
}
// download patch and saved as tmp file
g.waitAndPickClient()
allPRs = append(allPRs, &base.PullRequest{
Title: pr.GetTitle(),
Number: int64(pr.GetNumber()),
PosterID: pr.GetUser().GetID(),
PosterName: pr.GetUser().GetLogin(),
PosterEmail: pr.GetUser().GetEmail(),
Content: pr.GetBody(),
Milestone: pr.GetMilestone().GetTitle(),
State: pr.GetState(),
Created: pr.GetCreatedAt().Time,
Updated: pr.GetUpdatedAt().Time,
Closed: pr.ClosedAt.GetTime(),
Labels: labels,
Merged: pr.MergedAt != nil,
MergeCommitSHA: pr.GetMergeCommitSHA(),
MergedTime: pr.MergedAt.GetTime(),
IsLocked: pr.ActiveLockReason != nil,
Head: base.PullRequestBranch{
Ref: pr.GetHead().GetRef(),
SHA: pr.GetHead().GetSHA(),
OwnerName: pr.GetHead().GetUser().GetLogin(),
RepoName: pr.GetHead().GetRepo().GetName(),
CloneURL: pr.GetHead().GetRepo().GetCloneURL(), // see below for SECURITY related issues here
},
Base: base.PullRequestBranch{
Ref: pr.GetBase().GetRef(),
SHA: pr.GetBase().GetSHA(),
RepoName: pr.GetBase().GetRepo().GetName(),
OwnerName: pr.GetBase().GetUser().GetLogin(),
},
PatchURL: pr.GetPatchURL(), // see below for SECURITY related issues here
Reactions: reactions,
ForeignIndex: int64(*pr.Number),
})
// SECURITY: Ensure that the PR is safe
_ = CheckAndEnsureSafePR(allPRs[len(allPRs)-1], g.baseURL, g)
}
return allPRs, len(prs) < perPage, nil
return g.GetNewPullRequests(page, perPage, time.Time{})
}
func convertGithubReview(r *github.PullRequestReview) *base.Review {
// convertGithubReview converts github review to Gitea review
func (g *GithubDownloaderV3) convertGithubReview(r *github.PullRequestReview) *base.Review {
return &base.Review{
ID: r.GetID(),
ReviewerID: r.GetUser().GetID(),
@ -755,6 +596,7 @@ func convertGithubReview(r *github.PullRequestReview) *base.Review {
Content: r.GetBody(),
CreatedAt: r.GetSubmittedAt().Time,
State: r.GetState(),
OriginalID: r.GetID(),
}
}
@ -788,17 +630,18 @@ func (g *GithubDownloaderV3) convertGithubReviewComments(cs []*github.PullReques
}
rcs = append(rcs, &base.ReviewComment{
ID: c.GetID(),
InReplyTo: c.GetInReplyTo(),
Content: c.GetBody(),
TreePath: c.GetPath(),
DiffHunk: c.GetDiffHunk(),
Position: c.GetPosition(),
CommitID: c.GetCommitID(),
PosterID: c.GetUser().GetID(),
Reactions: reactions,
CreatedAt: c.GetCreatedAt().Time,
UpdatedAt: c.GetUpdatedAt().Time,
ID: c.GetID(),
InReplyTo: c.GetInReplyTo(),
Content: c.GetBody(),
TreePath: c.GetPath(),
DiffHunk: c.GetDiffHunk(),
Position: c.GetPosition(),
CommitID: c.GetCommitID(),
PosterID: c.GetUser().GetID(),
Reactions: reactions,
CreatedAt: c.GetCreatedAt().Time,
UpdatedAt: c.GetUpdatedAt().Time,
OriginalID: c.GetID(),
})
}
return rcs, nil
@ -822,7 +665,7 @@ func (g *GithubDownloaderV3) GetReviews(reviewable base.Reviewable) ([]*base.Rev
}
g.setRate(&resp.Rate)
for _, review := range reviews {
r := convertGithubReview(review)
r := g.convertGithubReview(review)
r.IssueIndex = reviewable.GetLocalIndex()
// retrieve all review comments
opt2 := &github.ListOptions{
@ -878,3 +721,230 @@ func (g *GithubDownloaderV3) GetReviews(reviewable base.Reviewable) ([]*base.Rev
}
return allReviews, nil
}
// GetNewIssues returns new issues updated after the given time according start and limit
func (g *GithubDownloaderV3) GetNewIssues(page, perPage int, updatedAfter time.Time) ([]*base.Issue, bool, error) {
return g.getIssuesSince(page, perPage, updatedAfter)
}
// getIssuesSince returns issues given page, perPage and since.
// when since is empty, it will return all issues
func (g *GithubDownloaderV3) getIssuesSince(page, perPage int, since time.Time) ([]*base.Issue, bool, error) {
if perPage > g.maxPerPage {
perPage = g.maxPerPage
}
opt := &github.IssueListByRepoOptions{
Sort: "created",
Direction: "asc",
State: "all",
Since: since,
ListOptions: github.ListOptions{
PerPage: perPage,
Page: page,
},
}
allIssues := make([]*base.Issue, 0, perPage)
g.waitAndPickClient()
issues, resp, err := g.getClient().Issues.ListByRepo(g.ctx, g.repoOwner, g.repoName, opt)
if err != nil {
return nil, false, fmt.Errorf("error while listing repos: %w", err)
}
log.Trace("Request get issues %d/%d, but in fact get %d", perPage, page, len(issues))
g.setRate(&resp.Rate)
for _, issue := range issues {
if issue.IsPullRequest() {
continue
}
labels := make([]*base.Label, 0, len(issue.Labels))
for _, l := range issue.Labels {
labels = append(labels, convertGithubLabel(l))
}
// get reactions
reactions, err := g.getIssueReactions(issue.GetNumber(), perPage)
if err != nil {
return nil, false, err
}
var assignees []string
for i := range issue.Assignees {
assignees = append(assignees, issue.Assignees[i].GetLogin())
}
allIssues = append(allIssues, &base.Issue{
Title: *issue.Title,
Number: int64(*issue.Number),
PosterID: issue.GetUser().GetID(),
PosterName: issue.GetUser().GetLogin(),
PosterEmail: issue.GetUser().GetEmail(),
Content: issue.GetBody(),
Milestone: issue.GetMilestone().GetTitle(),
State: issue.GetState(),
Created: issue.GetCreatedAt().Time,
Updated: issue.GetUpdatedAt().Time,
Labels: labels,
Reactions: reactions,
Closed: issue.ClosedAt.GetTime(),
IsLocked: issue.GetLocked(),
Assignees: assignees,
ForeignIndex: int64(*issue.Number),
})
}
return allIssues, len(issues) < perPage, nil
}
// GetNewComments returns comments of an issue or PR after the given time
func (g GithubDownloaderV3) GetNewComments(commentable base.Commentable, updatedAfter time.Time) ([]*base.Comment, bool, error) {
comments, err := g.getCommentsSince(commentable, &updatedAfter)
return comments, false, err
}
// GetAllNewComments returns paginated comments after the given time
func (g GithubDownloaderV3) GetAllNewComments(page, perPage int, updatedAfter time.Time) ([]*base.Comment, bool, error) {
return g.getAllCommentsSince(page, perPage, &updatedAfter)
}
// GetNewPullRequests returns pull requests after the given time according page and perPage
// If `updatedAfter` is zero-valued, it will return all pull requests
func (g *GithubDownloaderV3) GetNewPullRequests(page, perPage int, updatedAfter time.Time) ([]*base.PullRequest, bool, error) {
// Pulls API doesn't have parameter `since`, so we have to use Search API instead.
// By specifying `repo:owner/repo is:pr` in the query, we can get all pull requests of the repository.
// In addition, we can specify `updated:>=YYYY-MM-DDTHH:MM:SS+00:00` to get pull requests updated after the given time.
if perPage > g.maxPerPage {
perPage = g.maxPerPage
}
opt := &github.SearchOptions{
Sort: "created",
Order: "asc",
ListOptions: github.ListOptions{
PerPage: perPage,
Page: page,
},
}
allPRs := make([]*base.PullRequest, 0, perPage)
g.waitAndPickClient()
searchQuery := fmt.Sprintf("repo:%s/%s is:pr", g.repoOwner, g.repoName)
if !updatedAfter.IsZero() {
// GitHub requires time to be later than 1970-01-01, so we should skip `updated` part if it's zero.
// Timezone is denoted by plus/minus UTC offset, rather than 'Z',
// according to https://docs.github.com/en/search-github/searching-on-github/searching-issues-and-pull-requests#search-by-when-an-issue-or-pull-request-was-created-or-last-updated
timeStr := updatedAfter.Format("2006-01-02T15:04:05-07:00")
searchQuery += fmt.Sprintf(" updated:>=%s", timeStr)
}
result, resp, err := g.getClient().Search.Issues(g.ctx, searchQuery, opt)
if err != nil {
return nil, false, fmt.Errorf("error while listing repos: %v", err)
}
log.Trace("Request get issues %d/%d, but in fact get %d", perPage, page, len(result.Issues))
g.setRate(&resp.Rate)
for _, issue := range result.Issues {
pr, resp, err := g.getClient().PullRequests.Get(g.ctx, g.repoOwner, g.repoName, issue.GetNumber())
if err != nil {
return nil, false, fmt.Errorf("error while getting repo pull request: %v", err)
}
g.setRate(&resp.Rate)
basePR, err := g.convertGithubPullRequest(pr, perPage)
if err != nil {
return nil, false, err
}
allPRs = append(allPRs, basePR)
// SECURITY: Ensure that the PR is safe
_ = CheckAndEnsureSafePR(allPRs[len(allPRs)-1], g.baseURL, g)
}
return allPRs, len(result.Issues) < perPage, nil
}
// GetNewReviews returns new pull requests review after the given time
func (g GithubDownloaderV3) GetNewReviews(reviewable base.Reviewable, updatedAfter time.Time) ([]*base.Review, error) {
// Github does not support since parameter for reviews, so we need to get all reviews
return g.GetReviews(reviewable)
}
func (g *GithubDownloaderV3) convertGithubPullRequest(pr *github.PullRequest, perPage int) (*base.PullRequest, error) {
labels := make([]*base.Label, 0, len(pr.Labels))
for _, l := range pr.Labels {
labels = append(labels, convertGithubLabel(l))
}
// get reactions
reactions, err := g.getIssueReactions(pr.GetNumber(), perPage)
if err != nil {
return nil, err
}
// download patch and saved as tmp file
g.waitAndPickClient()
return &base.PullRequest{
Title: pr.GetTitle(),
Number: int64(pr.GetNumber()),
PosterID: pr.GetUser().GetID(),
PosterName: pr.GetUser().GetLogin(),
PosterEmail: pr.GetUser().GetEmail(),
Content: pr.GetBody(),
Milestone: pr.GetMilestone().GetTitle(),
State: pr.GetState(),
Created: pr.GetCreatedAt().Time,
Updated: pr.GetUpdatedAt().Time,
Closed: pr.ClosedAt.GetTime(),
Labels: labels,
Merged: pr.MergedAt != nil,
MergeCommitSHA: pr.GetMergeCommitSHA(),
MergedTime: pr.MergedAt.GetTime(),
IsLocked: pr.ActiveLockReason != nil,
Head: base.PullRequestBranch{
Ref: pr.GetHead().GetRef(),
SHA: pr.GetHead().GetSHA(),
OwnerName: pr.GetHead().GetUser().GetLogin(),
RepoName: pr.GetHead().GetRepo().GetName(),
CloneURL: pr.GetHead().GetRepo().GetCloneURL(),
},
Base: base.PullRequestBranch{
Ref: pr.GetBase().GetRef(),
SHA: pr.GetBase().GetSHA(),
RepoName: pr.GetBase().GetRepo().GetName(),
OwnerName: pr.GetBase().GetUser().GetLogin(),
},
PatchURL: pr.GetPatchURL(),
Reactions: reactions,
ForeignIndex: int64(*pr.Number),
}, nil
}
// getIssueReactions returns reactions using Github API
func (g *GithubDownloaderV3) getIssueReactions(number, perPage int) ([]*base.Reaction, error) {
var reactions []*base.Reaction
if !g.SkipReactions {
for i := 1; ; i++ {
g.waitAndPickClient()
res, resp, err := g.getClient().Reactions.ListIssueReactions(g.ctx, g.repoOwner, g.repoName, number, &github.ListOptions{
Page: i,
PerPage: perPage,
})
if err != nil {
return nil, err
}
g.setRate(&resp.Rate)
if len(res) == 0 {
break
}
for _, reaction := range res {
reactions = append(reactions, &base.Reaction{
UserID: reaction.User.GetID(),
UserName: reaction.User.GetLogin(),
Content: reaction.GetContent(),
})
}
}
}
return reactions, nil
}

View File

@ -11,11 +11,13 @@ import (
"net/url"
"path/filepath"
"strings"
"time"
"code.gitea.io/gitea/models"
repo_model "code.gitea.io/gitea/models/repo"
system_model "code.gitea.io/gitea/models/system"
user_model "code.gitea.io/gitea/models/user"
"code.gitea.io/gitea/modules/git"
"code.gitea.io/gitea/modules/hostmatcher"
"code.gitea.io/gitea/modules/log"
base "code.gitea.io/gitea/modules/migration"
@ -481,6 +483,304 @@ func migrateRepository(ctx context.Context, doer *user_model.User, downloader ba
return uploader.Finish()
}
// SyncRepository syncs a repository according MigrateOptions
func SyncRepository(ctx context.Context, doer *user_model.User, repo *repo_model.Repository, opts base.MigrateOptions, messenger base.Messenger, lastSynced time.Time) (*repo_model.Repository, error) {
ownerName := repo.OwnerName
downloader, err := newDownloader(ctx, ownerName, opts)
if err != nil {
return nil, err
}
if !downloader.SupportSyncing() {
log.Info("repository syncing is not supported, ignored")
return nil, nil
}
uploader := NewGiteaLocalUploader(ctx, doer, ownerName, opts.RepoName)
uploader.gitServiceType = opts.GitServiceType
uploader.repo = repo
uploader.gitRepo, err = git.OpenRepository(ctx, repo.RepoPath())
if err != nil {
log.Error("open repository failed: %v", err)
return nil, err
}
if err := syncRepository(downloader, uploader, opts, messenger, lastSynced); err != nil {
// It's different from migration that we shouldn't rollback here,
// because the only thing rollback does is to delete the repository
if err := system_model.CreateRepositoryNotice(fmt.Sprintf("Syncing repository from %s failed: %v", opts.OriginalURL, err)); err != nil {
log.Error("create repository notice failed: ", err)
}
return nil, err
}
return uploader.repo, nil
}
// syncRepository will download new information and then upload it to Uploader.
func syncRepository(downloader base.Downloader, uploader base.Uploader, opts base.MigrateOptions, messenger base.Messenger, lastSynced time.Time) error {
if messenger == nil {
messenger = base.NilMessenger
}
log.Trace("syncing topics")
messenger("repo.migrate.syncing_topics")
topics, err := downloader.GetTopics()
if err != nil {
if !base.IsErrNotSupported(err) {
return err
}
log.Warn("syncing topics is not supported, ignored")
}
if len(topics) != 0 {
if err = uploader.UpdateTopics(topics...); err != nil {
return err
}
}
if opts.Milestones {
log.Trace("syncing milestones")
messenger("repo.migrate.syncing_milestones")
milestones, err := downloader.GetMilestones()
if err != nil {
if !base.IsErrNotSupported(err) {
return err
}
log.Warn("syncing milestones is not supported, ignored")
}
msBatchSize := uploader.MaxBatchInsertSize("milestone")
for len(milestones) > 0 {
if len(milestones) < msBatchSize {
msBatchSize = len(milestones)
}
if err := uploader.UpdateMilestones(milestones...); err != nil {
return err
}
milestones = milestones[msBatchSize:]
}
}
if opts.Labels {
log.Trace("syncing labels")
messenger("repo.migrate.syncing_labels")
labels, err := downloader.GetLabels()
if err != nil {
if !base.IsErrNotSupported(err) {
return err
}
log.Warn("syncing labels is not supported, ignored")
}
lbBatchSize := uploader.MaxBatchInsertSize("label")
for len(labels) > 0 {
if len(labels) < lbBatchSize {
lbBatchSize = len(labels)
}
if err := uploader.UpdateLabels(labels...); err != nil {
return err
}
labels = labels[lbBatchSize:]
}
}
if opts.Releases {
log.Trace("syncing releases")
messenger("repo.migrate.syncing_releases")
releases, err := downloader.GetReleases()
if err != nil {
if !base.IsErrNotSupported(err) {
return err
}
log.Warn("syncing releases is not supported, ignored")
}
relBatchSize := uploader.MaxBatchInsertSize("release")
for len(releases) > 0 {
if len(releases) < relBatchSize {
relBatchSize = len(releases)
}
if err = uploader.PatchReleases(releases[:relBatchSize]...); err != nil {
return err
}
releases = releases[relBatchSize:]
}
// Once all releases (if any) are inserted, sync any remaining non-release tags
if err = uploader.SyncTags(); err != nil {
return err
}
}
var (
commentBatchSize = uploader.MaxBatchInsertSize("comment")
reviewBatchSize = uploader.MaxBatchInsertSize("review")
)
supportAllComments := downloader.SupportGetRepoComments()
if opts.Issues {
log.Trace("syncing issues and comments")
messenger("repo.migrate.syncing_issues")
issueBatchSize := uploader.MaxBatchInsertSize("issue")
for i := 1; ; i++ {
issues, isEnd, err := downloader.GetNewIssues(i, issueBatchSize, lastSynced)
if err != nil {
if !base.IsErrNotSupported(err) {
return err
}
log.Warn("syncing issues is not supported, ignored")
break
}
if err := uploader.PatchIssues(issues...); err != nil {
return err
}
if opts.Comments && !supportAllComments {
allComments := make([]*base.Comment, 0, commentBatchSize)
for _, issue := range issues {
log.Trace("syncing issue %d's comments", issue.Number)
comments, _, err := downloader.GetNewComments(issue, lastSynced)
if err != nil {
if !base.IsErrNotSupported(err) {
return err
}
log.Warn("syncing comments is not supported, ignored")
}
allComments = append(allComments, comments...)
if len(allComments) >= commentBatchSize {
if err = uploader.PatchComments(allComments[:commentBatchSize]...); err != nil {
return err
}
allComments = allComments[commentBatchSize:]
}
}
if len(allComments) > 0 {
if err = uploader.PatchComments(allComments...); err != nil {
return err
}
}
}
if isEnd {
break
}
}
}
if opts.PullRequests {
log.Trace("syncing pull requests and comments")
messenger("repo.migrate.syncing_pulls")
prBatchSize := uploader.MaxBatchInsertSize("pullrequest")
for i := 1; ; i++ {
prs, isEnd, err := downloader.GetNewPullRequests(i, prBatchSize, lastSynced)
if err != nil {
if !base.IsErrNotSupported(err) {
return err
}
log.Warn("syncing pull requests is not supported, ignored")
break
}
if err := uploader.PatchPullRequests(prs...); err != nil {
return err
}
if opts.Comments {
if !supportAllComments {
// plain comments
allComments := make([]*base.Comment, 0, commentBatchSize)
for _, pr := range prs {
log.Trace("syncing pull request %d's comments", pr.Number)
comments, _, err := downloader.GetNewComments(pr, lastSynced)
if err != nil {
if !base.IsErrNotSupported(err) {
return err
}
log.Warn("syncing comments is not supported, ignored")
}
allComments = append(allComments, comments...)
if len(allComments) >= commentBatchSize {
if err = uploader.PatchComments(allComments[:commentBatchSize]...); err != nil {
return err
}
allComments = allComments[commentBatchSize:]
}
}
if len(allComments) > 0 {
if err = uploader.PatchComments(allComments...); err != nil {
return err
}
}
}
// sync reviews
allReviews := make([]*base.Review, 0, reviewBatchSize)
for _, pr := range prs {
reviews, err := downloader.GetNewReviews(pr, lastSynced)
if err != nil {
if !base.IsErrNotSupported(err) {
return err
}
log.Warn("syncing reviews is not supported, ignored")
break
}
allReviews = append(allReviews, reviews...)
if len(allReviews) >= reviewBatchSize {
if err = uploader.PatchReviews(allReviews[:reviewBatchSize]...); err != nil {
return err
}
allReviews = allReviews[reviewBatchSize:]
}
}
if len(allReviews) > 0 {
if err = uploader.PatchReviews(allReviews...); err != nil {
return err
}
}
}
if isEnd {
break
}
}
}
if opts.Comments && supportAllComments {
log.Trace("syncing comments")
for i := 1; ; i++ {
comments, isEnd, err := downloader.GetAllNewComments(i, commentBatchSize, lastSynced)
if err != nil {
return err
}
if err := uploader.PatchComments(comments...); err != nil {
return err
}
if isEnd {
break
}
}
}
return uploader.Finish()
}
// Init migrations service
func Init() error {
// TODO: maybe we can deprecate these legacy ALLOWED_DOMAINS/ALLOW_LOCALNETWORKS/BLOCKED_DOMAINS, use ALLOWED_HOST_LIST/BLOCKED_HOST_LIST instead

View File

@ -17,12 +17,14 @@ import (
"code.gitea.io/gitea/modules/gitrepo"
"code.gitea.io/gitea/modules/lfs"
"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/migration"
"code.gitea.io/gitea/modules/process"
"code.gitea.io/gitea/modules/proxy"
repo_module "code.gitea.io/gitea/modules/repository"
"code.gitea.io/gitea/modules/setting"
"code.gitea.io/gitea/modules/timeutil"
"code.gitea.io/gitea/modules/util"
"code.gitea.io/gitea/services/migrations"
notify_service "code.gitea.io/gitea/services/notify"
)
@ -222,8 +224,8 @@ func pruneBrokenReferences(ctx context.Context,
return pruneErr
}
// runSync returns true if sync finished without error.
func runSync(ctx context.Context, m *repo_model.Mirror) ([]*mirrorSyncResult, bool) {
// runSyncGit returns true if sync git repos finished without error.
func runSyncGit(ctx context.Context, m *repo_model.Mirror) ([]*mirrorSyncResult, bool) {
repoPath := m.Repo.RepoPath()
wikiPath := m.Repo.WikiPath()
timeout := time.Duration(setting.Git.Timeout.Mirror) * time.Second
@ -321,7 +323,7 @@ func runSync(ctx context.Context, m *repo_model.Mirror) ([]*mirrorSyncResult, bo
}
log.Trace("SyncMirrors [repo: %-v]: syncing releases with tags...", m.Repo)
if err = repo_module.SyncReleasesWithTags(ctx, m.Repo, gitRepo); err != nil {
if err = repo_module.SyncReleasesWithTags(ctx, m.Repo, gitRepo, false); err != nil {
log.Error("SyncMirrors [repo: %-v]: failed to synchronize tags to releases: %v", m.Repo, err)
}
@ -419,47 +421,103 @@ func runSync(ctx context.Context, m *repo_model.Mirror) ([]*mirrorSyncResult, bo
return parseRemoteUpdateOutput(output, m.GetRemoteName()), true
}
// runSyncMisc runs the sync of Issues, Pull Requests, Reviews, Topics, Releases, Labels, and Milestones.
// It returns true if the sync was successful.
func runSyncMisc(ctx context.Context, m *repo_model.Mirror, lastSynced time.Time) bool {
repo := m.GetRepository(ctx)
remoteURL, remoteErr := git.GetRemoteURL(ctx, repo.RepoPath(), m.GetRemoteName())
if remoteErr != nil {
log.Error("SyncMirrors [repo: %-v]: GetRemoteAddress Error %v", m.Repo, remoteErr)
return false
}
password, ok := remoteURL.User.Password()
if !ok {
password = ""
}
opts := migration.MigrateOptions{
CloneAddr: repo.OriginalURL,
AuthUsername: remoteURL.User.Username(),
AuthPassword: password,
UID: int(repo.OwnerID),
RepoName: repo.Name,
Mirror: true,
LFS: m.LFS,
LFSEndpoint: m.LFSEndpoint,
GitServiceType: repo.OriginalServiceType,
Wiki: repo.HasWiki() && m.SyncWiki,
Issues: m.SyncIssues,
Milestones: m.SyncMilestones,
Labels: m.SyncLabels,
Releases: m.SyncReleases,
Comments: m.SyncComments,
PullRequests: m.SyncPullRequests,
// Topics
// ReleaseAssets
MigrateToRepoID: repo.ID,
MirrorInterval: m.Interval.String(),
}
_, err := migrations.SyncRepository(ctx, m.Repo.MustOwner(ctx), repo, opts, nil, lastSynced)
if err != nil {
log.Error("runSyncMisc [repo: %-v]: failed to run SyncRepository: %v", m.Repo, err)
}
return err == nil
}
// SyncPullMirror starts the sync of the pull mirror and schedules the next run.
func SyncPullMirror(ctx context.Context, repoID int64) bool {
log.Trace("SyncMirrors [repo_id: %v]", repoID)
log.Trace("SyncPullMirror [repo_id: %v]", repoID)
defer func() {
err := recover()
if err == nil {
return
}
// There was a panic whilst syncMirrors...
log.Error("PANIC whilst SyncMirrors[repo_id: %d] Panic: %v\nStacktrace: %s", repoID, err, log.Stack(2))
log.Error("PANIC whilst SyncPullMirror[repo_id: %d] Panic: %v\nStacktrace: %s", repoID, err, log.Stack(2))
}()
m, err := repo_model.GetMirrorByRepoID(ctx, repoID)
if err != nil {
log.Error("SyncMirrors [repo_id: %v]: unable to GetMirrorByRepoID: %v", repoID, err)
log.Error("SyncPullMirror [repo_id: %v]: unable to GetMirrorByRepoID: %v", repoID, err)
return false
}
_ = m.GetRepository(ctx) // force load repository of mirror
// UpdatedUnix will be updated by runSyncGit, but we need to store it here to use it in runSyncMisc
lastSynced := m.UpdatedUnix.AsTime()
ctx, _, finished := process.GetManager().AddContext(ctx, fmt.Sprintf("Syncing Mirror %s/%s", m.Repo.OwnerName, m.Repo.Name))
defer finished()
log.Trace("SyncMirrors [repo: %-v]: Running Sync", m.Repo)
results, ok := runSync(ctx, m)
log.Trace("SyncPullMirror [repo: %-v]: Running Sync Git", m.Repo)
results, ok := runSyncGit(ctx, m)
if !ok {
if err = repo_model.TouchMirror(ctx, m); err != nil {
log.Error("SyncMirrors [repo: %-v]: failed to TouchMirror: %v", m.Repo, err)
log.Error("SyncPullMirror [repo: %-v]: failed to TouchMirror: %v", m.Repo, err)
}
return false
}
log.Trace("SyncMirrors [repo: %-v]: Scheduling next update", m.Repo)
if ok := runSyncMisc(ctx, m, lastSynced); !ok {
if err = repo_model.TouchMirror(ctx, m); err != nil {
log.Error("SyncPullMirror [repo: %-v]: failed to TouchMirror: %v", m.Repo, err)
}
return false
}
log.Trace("SyncPullMirror [repo: %-v]: Scheduling next update", m.Repo)
m.ScheduleNextUpdate()
if err = repo_model.UpdateMirror(ctx, m); err != nil {
log.Error("SyncMirrors [repo: %-v]: failed to UpdateMirror with next update date: %v", m.Repo, err)
log.Error("SyncPullMirror [repo: %-v]: failed to UpdateMirror with next update date: %v", m.Repo, err)
return false
}
gitRepo, err := gitrepo.OpenRepository(ctx, m.Repo)
if err != nil {
log.Error("SyncMirrors [repo: %-v]: unable to OpenRepository: %v", m.Repo, err)
log.Error("SyncPullMirror [repo: %-v]: unable to OpenRepository: %v", m.Repo, err)
return false
}
defer gitRepo.Close()
@ -482,7 +540,7 @@ func SyncPullMirror(ctx context.Context, repoID int64) bool {
if result.oldCommitID == gitShortEmptySha {
commitID, err := gitRepo.GetRefCommitID(result.refName.String())
if err != nil {
log.Error("SyncMirrors [repo: %-v]: unable to GetRefCommitID [ref_name: %s]: %v", m.Repo, result.refName, err)
log.Error("SyncPullMirror [repo: %-v]: unable to GetRefCommitID [ref_name: %s]: %v", m.Repo, result.refName, err)
continue
}
objectFormat := git.ObjectFormatFromName(m.Repo.ObjectFormatName)
@ -504,17 +562,17 @@ func SyncPullMirror(ctx context.Context, repoID int64) bool {
// Push commits
oldCommitID, err := git.GetFullCommitID(gitRepo.Ctx, gitRepo.Path, result.oldCommitID)
if err != nil {
log.Error("SyncMirrors [repo: %-v]: unable to get GetFullCommitID[%s]: %v", m.Repo, result.oldCommitID, err)
log.Error("SyncPullMirror [repo: %-v]: unable to get GetFullCommitID[%s]: %v", m.Repo, result.oldCommitID, err)
continue
}
newCommitID, err := git.GetFullCommitID(gitRepo.Ctx, gitRepo.Path, result.newCommitID)
if err != nil {
log.Error("SyncMirrors [repo: %-v]: unable to get GetFullCommitID [%s]: %v", m.Repo, result.newCommitID, err)
log.Error("SyncPullMirror [repo: %-v]: unable to get GetFullCommitID [%s]: %v", m.Repo, result.newCommitID, err)
continue
}
commits, err := gitRepo.CommitsBetweenIDs(newCommitID, oldCommitID)
if err != nil {
log.Error("SyncMirrors [repo: %-v]: unable to get CommitsBetweenIDs [new_commit_id: %s, old_commit_id: %s]: %v", m.Repo, newCommitID, oldCommitID, err)
log.Error("SyncPullMirror [repo: %-v]: unable to get CommitsBetweenIDs [new_commit_id: %s, old_commit_id: %s]: %v", m.Repo, newCommitID, oldCommitID, err)
continue
}
@ -525,7 +583,7 @@ func SyncPullMirror(ctx context.Context, repoID int64) bool {
newCommit, err := gitRepo.GetCommit(newCommitID)
if err != nil {
log.Error("SyncMirrors [repo: %-v]: unable to get commit %s: %v", m.Repo, newCommitID, err)
log.Error("SyncPullMirror [repo: %-v]: unable to get commit %s: %v", m.Repo, newCommitID, err)
continue
}
@ -538,11 +596,11 @@ func SyncPullMirror(ctx context.Context, repoID int64) bool {
NewCommitID: newCommitID,
}, theCommits)
}
log.Trace("SyncMirrors [repo: %-v]: done notifying updated branches/tags - now updating last commit time", m.Repo)
log.Trace("SyncPullMirror [repo: %-v]: done notifying updated branches/tags - now updating last commit time", m.Repo)
isEmpty, err := gitRepo.IsEmpty()
if err != nil {
log.Error("SyncMirrors [repo: %-v]: unable to check empty git repo: %v", m.Repo, err)
log.Error("SyncPullMirror [repo: %-v]: unable to check empty git repo: %v", m.Repo, err)
return false
}
if !isEmpty {
@ -554,12 +612,12 @@ func SyncPullMirror(ctx context.Context, repoID int64) bool {
}
if err = repo_model.UpdateRepositoryUpdatedTime(ctx, m.RepoID, commitDate); err != nil {
log.Error("SyncMirrors [repo: %-v]: unable to update repository 'updated_unix': %v", m.Repo, err)
log.Error("SyncPullMirror [repo: %-v]: unable to update repository 'updated_unix': %v", m.Repo, err)
return false
}
}
log.Trace("SyncMirrors [repo: %-v]: Successfully updated", m.Repo)
log.Trace("SyncPullMirror [repo: %-v]: Successfully updated", m.Repo)
return true
}

View File

@ -195,7 +195,7 @@ func adoptRepository(ctx context.Context, repoPath string, u *user_model.User, r
}
defer gitRepo.Close()
if err = repo_module.SyncReleasesWithTags(ctx, repo, gitRepo); err != nil {
if err = repo_module.SyncReleasesWithTags(ctx, repo, gitRepo, true); err != nil {
return fmt.Errorf("SyncReleasesWithTags: %w", err)
}

View File

@ -204,7 +204,7 @@ func ForkRepository(ctx context.Context, doer, owner *user_model.User, opts Fork
log.Error("Open created git repository failed: %v", err)
} else {
defer gitRepo.Close()
if err := repo_module.SyncReleasesWithTags(ctx, repo, gitRepo); err != nil {
if err := repo_module.SyncReleasesWithTags(ctx, repo, gitRepo, false); err != nil {
log.Error("Sync releases from git tags failed: %v", err)
}
}

View File

@ -159,7 +159,7 @@ func MigrateRepositoryGitData(ctx context.Context, u *user_model.User,
// note: this will greatly improve release (tag) sync
// for pull-mirrors with many tags
repo.IsMirror = opts.Mirror
if err = repo_module.SyncReleasesWithTags(ctx, repo, gitRepo); err != nil {
if err = repo_module.SyncReleasesWithTags(ctx, repo, gitRepo, true); err != nil {
log.Error("Failed to synchronize tags to releases for repository: %v", err)
}
}

View File

@ -19,7 +19,7 @@
{{template "repo/issue/search" .}}
{{if not .Repository.IsArchived}}
{{if .PageIsIssueList}}
<a class="ui small primary button issue-list-new" href="{{.RepoLink}}/issues/new{{if .NewIssueChooseTemplate}}/choose{{end}}">{{ctx.Locale.Tr "repo.issues.new"}}</a>
<a class="ui small primary button issue-list-new{{if not .AllowsIssues}} disabled{{end}}" href="{{.RepoLink}}/issues/new{{if .NewIssueChooseTemplate}}/choose{{end}}">{{ctx.Locale.Tr "repo.issues.new"}}</a>
{{else}}
<a class="ui small primary button new-pr-button issue-list-new{{if not .PullRequestCtx.Allowed}} disabled{{end}}" href="{{if .PullRequestCtx.Allowed}}{{.Repository.Link}}/compare/{{.Repository.DefaultBranch | PathEscapeSegments}}...{{if ne .Repository.Owner.Name .PullRequestCtx.BaseRepo.Owner.Name}}{{PathEscape .Repository.Owner.Name}}:{{end}}{{.Repository.DefaultBranch | PathEscapeSegments}}{{end}}">{{ctx.Locale.Tr "repo.pulls.new"}}</a>
{{end}}

View File

@ -10,6 +10,9 @@ const lfsSettings = document.getElementById('lfs_settings');
const lfsEndpoint = document.getElementById('lfs_endpoint');
const items = document.querySelectorAll('#migrate_items input[type=checkbox]');
// services that supports migration items with mirroring
const allowedServiceTypes = [2]; // 2 = GitHub service type
export function initRepoMigration() {
checkAuth();
setLFSSettingsVisibility();
@ -48,7 +51,7 @@ function checkItems(tokenAuth) {
} else {
enableItems = user?.value !== '' || pass?.value !== '';
}
if (enableItems && Number(service?.value) > 1) {
if (enableItems && Number(service?.value) > 1 && !allowedServiceTypes.includes(Number(service?.value))) {
if (mirror?.checked) {
for (const item of items) {
item.disabled = item.name !== 'wiki';