XJAW762UV4MWZSHDLIVIMI5HL2BWYVNZGLFZY4IW7BEE3E2KN3CAC var d DirEntryvar err errordata, d.IsDirectory, err = mapValue(uint16LE, func(n uint16) bool {return n&0x200 != 0})(data)if err != err {return data, d, err
if len(data) < 3 {return data, DirEntry{}, fmt.Errorf("too short to be a valid directory entry: %d bytes", len(data))
package mainimport ("bufio""fmt""io""os")type markedChange struct {mark intstate string}type Marks struct {marks intchanges []markedChange}func (m *Marks) Next() int {m.marks++return m.marks}func (m *Marks) MarkChanges(changes []change) {// Make a lookup table for existing marks, and make sure we don't// reuse them.stateToMark := make(map[string]int)for _, mc := range m.changes {stateToMark[mc.state] = mc.markif mc.mark > m.marks {m.marks = mc.mark}}for i := range changes {if mark, ok := stateToMark[changes[i].State]; ok {changes[i].mark = markchanges[i].exported = true} else {changes[i].mark = m.Next()m.changes = append(m.changes, markedChange{mark: changes[i].mark,state: changes[i].State,})}}}// Import loads a marks file analogous to those used by git fast-export's// --import-marks and --export-marks switches.func (m *Marks) Import(filename string) error {f, err := os.Open(filename)if err != nil {return err}defer f.Close()br := bufio.NewReader(f)for {var mc markedChangen, err := fmt.Fscanf(br, ":%d %s\n", &mc.mark, &mc.state)if err == io.ErrUnexpectedEOF && n == 0 {return nil}if err != nil {return err}m.changes = append(m.changes, mc)}}func (m *Marks) Export(filename string) error {f, err := os.Create(filename)if err != nil {return err}for _, c := range m.changes {fmt.Fprintf(f, ":%d %s\n", c.mark, c.state)}return f.Close()}
package mainimport ("bytes""encoding/json""flag""fmt""os""os/exec""path""path/filepath""pijul-go""strings""time")var (repo = flag.String("repo", ".", "path of the repository to export")channel = flag.String("channel", "main", "which channel to export")branch = flag.String("branch", "", "destination branch in Git (default is the same as channel)")markFile = flag.String("marks", "", "path to file to store persistent marks"))func formatTime(t time.Time) string {return fmt.Sprintf("%d %s", t.Unix(), t.Format("-0700"))}type change struct {Hash string `json:"hash"`Authors []string `json:"authors"`Timestamp time.Time `json:"timestamp"`Message string `json:"message"`State string `json:"state"`mark intexported bool}func printErrorAndExit(description string, err error) {msg := err.Error()if err, ok := err.(*exec.ExitError); ok && len(err.Stderr) > 0 {msg = string(err.Stderr)}fmt.Fprintln(os.Stderr, description, msg)os.Exit(2)}func main() {flag.Parse()if *branch == "" {*branch = *channel}logBytes, err := exec.Command("pijul", "log", "--state", "--repository", *repo, "--output-format=json").Output()if err != nil {printErrorAndExit("Error running pijul log:", err)}var changes []changeerr = json.Unmarshal(logBytes, &changes)if err != nil {printErrorAndExit("Error parsing pijul log output:", err)}// Reverse the list of changes.for i, j := 0, len(changes)-1; i < j; i, j = i+1, j-1 {changes[i], changes[j] = changes[j], changes[i]}stream := new(FastExportStream)if *markFile != "" {if err := stream.marks.Import(*markFile); err != nil {printErrorAndExit("Error loading marks file:", err)}}stream.marks.MarkChanges(changes)pristine := pijul.NewGraph()for changeIndex, c := range changes {changeBytes, err := os.ReadFile(filepath.Join(*repo, ".pijul", "changes", c.Hash[:2], c.Hash[2:]+".change"))if err != nil {printErrorAndExit(fmt.Sprintf("error loading change %s:", c.Hash), err)}change, err := pijul.DeserializeChange(changeBytes)if err != nil {printErrorAndExit(fmt.Sprintf("error deserializing change %s:", c.Hash), err)}hash, err := pijul.HashFromBase32(c.Hash)if err != nil {printErrorAndExit("error parsing hash:", err)}err = pristine.ApplyChange(hash, change)if err != nil {printErrorAndExit(fmt.Sprintf("error applying change %s:", c.Hash), err)}if c.exported {continue}var commit Commitcommit.Mark = c.markif len(c.Authors) > 0 && strings.Contains(c.Authors[0], "<") {// Using the author from the log lets use use the result of pijul// looking up the identity.commit.Committer = c.Authors[0]} else if len(change.Authors) > 0 {a := change.Authors[0]name := a["full_name"]if name == "" {name = a["name"]}if name == "" {name = a["key"]}commit.Committer = name + " <" + a["email"] + ">"} else {commit.Committer = "<>"}commit.Timestamp = change.Timestampcommit.Branch = *branchmessage := change.Messageif change.Description != "" {message += "\n\n" + change.Description}commit.Message = messageif changeIndex > 0 {commit.From = changes[changeIndex-1].mark}// To specify the content for the commit, we remove everything// and then add back in all the files that are present after the change.commit.DeleteAll = trueroot, err := pristine.RootDirectory()if err != nil {printErrorAndExit("error getting root directory:", err)}err = addFiles(&commit, stream, root, "")if err != nil {printErrorAndExit("error outputting files:", err)}stream.AddCommit(commit)}if err := stream.WriteTo(os.Stdout); err != nil {printErrorAndExit("Error writing output stream:", err)}if *markFile != "" {if err := stream.marks.Export(*markFile); err != nil {printErrorAndExit("Error writing marks file:", err)}}}func addFiles(commit *Commit, stream *FastExportStream, dirInode *pijul.Block, pathPrefix string) error {entries, err := pijul.ReadDir(dirInode)if err != nil {return err}for _, e := range entries {if e.IsDirectory {err = addFiles(commit, stream, e.Inode, path.Join(pathPrefix, e.Name))if err != nil {return err}} else {buf := new(bytes.Buffer)err = pijul.OutputFile(buf, e.Inode)if err != nil {return err}b := stream.AddBlob(buf.Bytes())commit.Modifications = append(commit.Modifications, FileModify{Blob: b, Path: path.Join(pathPrefix, e.Name)})}}return nil}
package mainimport ("crypto/sha512""fmt""io""time")type FileModify struct {Blob intPath string}func (f FileModify) WriteTo(w io.Writer) error {_, err := fmt.Fprintf(w, "M 644 :%d %s\n", f.Blob, f.Path)return err}type Commit struct {Mark intBranch stringCommitter stringTimestamp time.TimeMessage stringFrom intDeleteAll boolModifications []FileModify}func (c Commit) WriteTo(w io.Writer) error {if _, err := fmt.Fprintln(w, "commit refs/heads/"+c.Branch); err != nil {return err}if c.Mark != 0 {if _, err := fmt.Fprintf(w, "mark :%d\n", c.Mark); err != nil {return err}}if _, err := fmt.Fprintln(w, "committer", c.Committer, formatTime(c.Timestamp)); err != nil {return err}if _, err := fmt.Fprintln(w, "data", len(c.Message)); err != nil {return err}if _, err := fmt.Fprintln(w, c.Message); err != nil {return err}if c.From != 0 {if _, err := fmt.Fprintf(w, "from :%d\n", c.From); err != nil {return err}}if c.DeleteAll {if _, err := fmt.Fprintln(w, "deleteall"); err != nil {return err}}for _, m := range c.Modifications {if err := m.WriteTo(w); err != nil {return err}}if _, err := fmt.Fprintln(w); err != nil {return err}return nil}type Blob struct {Mark intData []byte}func (b Blob) WriteTo(w io.Writer) error {if _, err := fmt.Fprintln(w, "blob"); err != nil {return err}if b.Mark != 0 {if _, err := fmt.Fprintf(w, "mark :%d\n", b.Mark); err != nil {return err}}if _, err := fmt.Fprintln(w, "data", len(b.Data)); err != nil {return err}if _, err := w.Write(b.Data); err != nil {return err}if _, err := fmt.Fprintln(w); err != nil {return err}return nil}// A FastExportStream is an in-memory representation of a fast-export stream.type FastExportStream struct {Commits []CommitBlobs []Blobmarks MarksblobIndex map[[64]byte]int}func (f *FastExportStream) AddCommit(c Commit) {if c.Mark == 0 {c.Mark = f.marks.Next()}f.Commits = append(f.Commits, c)}func (f *FastExportStream) ReverseCommits() {for i, j := 0, len(f.Commits)-1; j > i; i, j = i+1, j-1 {f.Commits[i], f.Commits[j] = f.Commits[j], f.Commits[i]}}func (f *FastExportStream) WriteTo(w io.Writer) error {for _, b := range f.Blobs {if err := b.WriteTo(w); err != nil {return err}}for _, c := range f.Commits {if err := c.WriteTo(w); err != nil {return err}}return nil}// AddBlob adds a blob to the stream and returns its mark.func (f *FastExportStream) AddBlob(data []byte) int {hash := sha512.Sum512(data)if mark, ok := f.blobIndex[hash]; ok {return mark}b := Blob{Mark: f.marks.Next(),Data: data,}f.Blobs = append(f.Blobs, b)if f.blobIndex == nil {f.blobIndex = make(map[[64]byte]int)}f.blobIndex[hash] = b.Markreturn b.Mark}