mirror of
https://codeberg.org/guix/guix.git
synced 2026-04-28 06:34:05 +00:00
Compare commits
5 Commits
emacs-team
...
wip-ipfs-s
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
79b0f72a9e | ||
|
|
5fdb66f176 | ||
|
|
b900520019 | ||
|
|
7ad64d8453 | ||
|
|
1517fb1a77 |
@@ -77,6 +77,7 @@ MODULES = \
|
||||
guix/discovery.scm \
|
||||
guix/git-download.scm \
|
||||
guix/hg-download.scm \
|
||||
guix/json.scm \
|
||||
guix/swh.scm \
|
||||
guix/monads.scm \
|
||||
guix/monad-repl.scm \
|
||||
@@ -100,6 +101,7 @@ MODULES = \
|
||||
guix/cve.scm \
|
||||
guix/workers.scm \
|
||||
guix/zlib.scm \
|
||||
guix/ipfs.scm \
|
||||
guix/build-system.scm \
|
||||
guix/build-system/android-ndk.scm \
|
||||
guix/build-system/ant.scm \
|
||||
@@ -383,6 +385,7 @@ SCM_TESTS = \
|
||||
tests/cve.scm \
|
||||
tests/workers.scm \
|
||||
tests/zlib.scm \
|
||||
tests/ipfs.scm \
|
||||
tests/file-systems.scm \
|
||||
tests/uuid.scm \
|
||||
tests/system.scm \
|
||||
|
||||
@@ -8470,6 +8470,15 @@ caching of the archives before they are sent to clients---see below for
|
||||
details. The @command{guix weather} command provides a handy way to
|
||||
check what a server provides (@pxref{Invoking guix weather}).
|
||||
|
||||
@cindex peer-to-peer, substitute distribution
|
||||
@cindex distributed storage, of substitutes
|
||||
@cindex IPFS, for substitutes
|
||||
It is also possible to publish substitutes over @uref{https://ipfs.io, IFPS},
|
||||
a distributed, peer-to-peer storage mechanism. To enable it, pass the
|
||||
@option{--ipfs} option alongside @option{--cache}, and make sure you're
|
||||
running @command{ipfs daemon}. Capable clients will then be able to choose
|
||||
whether to fetch substitutes over HTTP or over IPFS.
|
||||
|
||||
As a bonus, @command{guix publish} also serves as a content-addressed
|
||||
mirror for source files referenced in @code{origin} records
|
||||
(@pxref{origin Reference}). For instance, assuming @command{guix
|
||||
@@ -8560,6 +8569,30 @@ thread per CPU core is created, but this can be customized. See
|
||||
When @option{--ttl} is used, cached entries are automatically deleted
|
||||
when they have expired.
|
||||
|
||||
@item --ifps[=@var{gateway}]
|
||||
When used in conjunction with @option{--cache}, instruct @command{guix
|
||||
publish} to publish substitutes over the @uref{https://ipfs.io, IPFS
|
||||
distributed data store} in addition to HTTP.
|
||||
|
||||
@quotation Note
|
||||
As of version @value{VERSION}, IPFS support is experimental. You're welcome
|
||||
to share your experience with the developers by emailing
|
||||
@email{guix-devel@@gnu.org}!
|
||||
@end quotation
|
||||
|
||||
The IPFS HTTP interface must be reachable at @var{gateway}, by default
|
||||
@code{localhost:5001}. To get it up and running, it is usually enough to
|
||||
install IPFS and start the IPFS daemon:
|
||||
|
||||
@example
|
||||
$ guix package -i go-ipfs
|
||||
$ ipfs init
|
||||
$ ipfs daemon
|
||||
@end example
|
||||
|
||||
For more information on how to get started with IPFS, please refer to the
|
||||
@uref{https://docs.ipfs.io/introduction/usage/, IPFS documentation}.
|
||||
|
||||
@item --workers=@var{N}
|
||||
When @option{--cache} is used, request the allocation of @var{N} worker
|
||||
threads to ``bake'' archives.
|
||||
|
||||
250
guix/ipfs.scm
Normal file
250
guix/ipfs.scm
Normal file
@@ -0,0 +1,250 @@
|
||||
;;; GNU Guix --- Functional package management for GNU
|
||||
;;; Copyright © 2018 Ludovic Courtès <ludo@gnu.org>
|
||||
;;;
|
||||
;;; This file is part of GNU Guix.
|
||||
;;;
|
||||
;;; GNU Guix is free software; you can redistribute it and/or modify it
|
||||
;;; under the terms of the GNU General Public License as published by
|
||||
;;; the Free Software Foundation; either version 3 of the License, or (at
|
||||
;;; your option) any later version.
|
||||
;;;
|
||||
;;; GNU Guix is distributed in the hope that it will be useful, but
|
||||
;;; WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
;;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
;;; GNU General Public License for more details.
|
||||
;;;
|
||||
;;; You should have received a copy of the GNU General Public License
|
||||
;;; along with GNU Guix. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
(define-module (guix ipfs)
|
||||
#:use-module (guix json)
|
||||
#:use-module (guix base64)
|
||||
#:use-module ((guix build utils) #:select (dump-port))
|
||||
#:use-module (srfi srfi-1)
|
||||
#:use-module (srfi srfi-11)
|
||||
#:use-module (srfi srfi-26)
|
||||
#:use-module (rnrs io ports)
|
||||
#:use-module (rnrs bytevectors)
|
||||
#:use-module (ice-9 match)
|
||||
#:use-module (ice-9 ftw)
|
||||
#:use-module (web uri)
|
||||
#:use-module (web client)
|
||||
#:use-module (web response)
|
||||
#:export (%ipfs-base-url
|
||||
add-file
|
||||
add-file-tree
|
||||
restore-file-tree
|
||||
|
||||
content?
|
||||
content-name
|
||||
content-hash
|
||||
content-size
|
||||
|
||||
add-empty-directory
|
||||
add-to-directory
|
||||
read-contents
|
||||
publish-name))
|
||||
|
||||
;;; Commentary:
|
||||
;;;
|
||||
;;; This module implements bindings for the HTTP interface of the IPFS
|
||||
;;; gateway, documented here: <https://docs.ipfs.io/reference/api/http/>. It
|
||||
;;; allows you to add and retrieve files over IPFS, and a few other things.
|
||||
;;;
|
||||
;;; Code:
|
||||
|
||||
(define %ipfs-base-url
|
||||
;; URL of the IPFS gateway.
|
||||
(make-parameter "http://localhost:5001"))
|
||||
|
||||
(define* (call url decode #:optional (method http-post)
|
||||
#:key body (false-if-404? #t) (headers '()))
|
||||
"Invoke the endpoint at URL using METHOD. Decode the resulting JSON body
|
||||
using DECODE, a one-argument procedure that takes an input port; when DECODE
|
||||
is false, return the input port. When FALSE-IF-404? is true, return #f upon
|
||||
404 responses."
|
||||
(let*-values (((response port)
|
||||
(method url #:streaming? #t
|
||||
#:body body
|
||||
|
||||
;; Always pass "Connection: close".
|
||||
#:keep-alive? #f
|
||||
#:headers `((connection close)
|
||||
,@headers))))
|
||||
(cond ((= 200 (response-code response))
|
||||
(if decode
|
||||
(let ((result (decode port)))
|
||||
(close-port port)
|
||||
result)
|
||||
port))
|
||||
((and false-if-404?
|
||||
(= 404 (response-code response)))
|
||||
(close-port port)
|
||||
#f)
|
||||
(else
|
||||
(close-port port)
|
||||
(throw 'ipfs-error url response)))))
|
||||
|
||||
;; Result of a file addition.
|
||||
(define-json-mapping <content> make-content content?
|
||||
json->content
|
||||
(name content-name "Name")
|
||||
(hash content-hash "Hash")
|
||||
(bytes content-bytes "Bytes")
|
||||
(size content-size "Size" string->number))
|
||||
|
||||
;; Result of a 'patch/add-link' operation.
|
||||
(define-json-mapping <directory> make-directory directory?
|
||||
json->directory
|
||||
(hash directory-hash "Hash")
|
||||
(links directory-links "Links" json->links))
|
||||
|
||||
;; A "link".
|
||||
(define-json-mapping <link> make-link link?
|
||||
json->link
|
||||
(name link-name "Name")
|
||||
(hash link-hash "Hash")
|
||||
(size link-size "Size" string->number))
|
||||
|
||||
;; A "binding", also known as a "name".
|
||||
(define-json-mapping <binding> make-binding binding?
|
||||
json->binding
|
||||
(name binding-name "Name")
|
||||
(value binding-value "Value"))
|
||||
|
||||
(define (json->links json)
|
||||
(match json
|
||||
(#f '())
|
||||
(links (map json->link links))))
|
||||
|
||||
(define %multipart-boundary
|
||||
;; XXX: We might want to find a more reliable boundary.
|
||||
(string-append (make-string 24 #\-) "2698127afd7425a6"))
|
||||
|
||||
(define (bytevector->form-data bv port)
|
||||
"Write to PORT a 'multipart/form-data' representation of BV."
|
||||
(display (string-append "--" %multipart-boundary "\r\n"
|
||||
"Content-Disposition: form-data\r\n"
|
||||
"Content-Type: application/octet-stream\r\n\r\n")
|
||||
port)
|
||||
(put-bytevector port bv)
|
||||
(display (string-append "\r\n--" %multipart-boundary "--\r\n")
|
||||
port))
|
||||
|
||||
(define* (add-data data #:key (name "file.txt") recursive?)
|
||||
"Add DATA, a bytevector, to IPFS. Return a content object representing it."
|
||||
(call (string-append (%ipfs-base-url)
|
||||
"/api/v0/add?arg=" (uri-encode name)
|
||||
"&recursive="
|
||||
(if recursive? "true" "false"))
|
||||
json->content
|
||||
#:headers
|
||||
`((content-type
|
||||
. (multipart/form-data
|
||||
(boundary . ,%multipart-boundary))))
|
||||
#:body
|
||||
(call-with-bytevector-output-port
|
||||
(lambda (port)
|
||||
(bytevector->form-data data port)))))
|
||||
|
||||
(define (not-dot? entry)
|
||||
(not (member entry '("." ".."))))
|
||||
|
||||
(define (file-tree->sexp file)
|
||||
"Add FILE, recursively, to the IPFS, and return an sexp representing the
|
||||
directory's tree structure.
|
||||
|
||||
Unlike IPFS's own \"UnixFS\" structure, this format preserves exactly what we
|
||||
need: like the nar format, it preserves the executable bit, but does not save
|
||||
the mtime or other Unixy attributes irrelevant in the store."
|
||||
;; The natural approach would be to insert each directory listing as an
|
||||
;; object of its own in IPFS. However, this does not buy us much in terms
|
||||
;; of deduplication, but it does cause a lot of extra round trips when
|
||||
;; fetching it. Thus, this sexp is \"flat\" in that only the leaves are
|
||||
;; inserted into the IPFS.
|
||||
(let ((st (lstat file)))
|
||||
(match (stat:type st)
|
||||
('directory
|
||||
(let* ((parent file)
|
||||
(entries (map (lambda (file)
|
||||
`(entry ,file
|
||||
,(file-tree->sexp
|
||||
(string-append parent "/" file))))
|
||||
(scandir file not-dot?)))
|
||||
(size (fold (lambda (entry total)
|
||||
(match entry
|
||||
(('entry name (kind value size))
|
||||
(+ total size))))
|
||||
0
|
||||
entries)))
|
||||
`(directory ,entries ,size)))
|
||||
('symlink
|
||||
`(symlink ,(readlink file) 0))
|
||||
('regular
|
||||
(let ((size (stat:size st)))
|
||||
(if (zero? (logand (stat:mode st) #o100))
|
||||
`(file ,(content-name (add-file file)) ,size)
|
||||
`(executable ,(content-name (add-file file)) ,size)))))))
|
||||
|
||||
(define (add-file-tree file)
|
||||
"Add FILE to the IPFS, recursively, using our own canonical directory
|
||||
format. Return the resulting content object."
|
||||
(add-data (string->utf8 (object->string
|
||||
`(file-tree (version 0)
|
||||
,(file-tree->sexp file))))))
|
||||
|
||||
(define (restore-file-tree object file)
|
||||
"Restore to FILE the tree pointed to by OBJECT."
|
||||
(let restore ((tree (match (read (read-contents object))
|
||||
(('file-tree ('version 0) tree)
|
||||
tree)))
|
||||
(file file))
|
||||
(match tree
|
||||
(('file object size)
|
||||
(call-with-output-file file
|
||||
(lambda (output)
|
||||
(dump-port (read-contents object) output))))
|
||||
(('executable object size)
|
||||
(call-with-output-file file
|
||||
(lambda (output)
|
||||
(dump-port (read-contents object) output)))
|
||||
(chmod file #o555))
|
||||
(('symlink target size)
|
||||
(symlink target file))
|
||||
(('directory (('entry names entries) ...) size)
|
||||
(mkdir file)
|
||||
(for-each restore entries
|
||||
(map (cut string-append file "/" <>) names))))))
|
||||
|
||||
(define* (add-file file #:key (name (basename file)))
|
||||
"Add FILE under NAME to the IPFS and return a content object for it."
|
||||
(add-data (match (call-with-input-file file get-bytevector-all)
|
||||
((? eof-object?) #vu8())
|
||||
(bv bv))
|
||||
#:name name))
|
||||
|
||||
(define* (add-empty-directory #:key (name "directory"))
|
||||
"Return a content object for an empty directory."
|
||||
(add-data #vu8() #:recursive? #t #:name name))
|
||||
|
||||
(define* (add-to-directory directory file name)
|
||||
"Add FILE to DIRECTORY under NAME, and return the resulting directory.
|
||||
DIRECTORY and FILE must be hashes identifying objects in the IPFS store."
|
||||
(call (string-append (%ipfs-base-url)
|
||||
"/api/v0/object/patch/add-link?arg="
|
||||
(uri-encode directory)
|
||||
"&arg=" (uri-encode name) "&arg=" (uri-encode file)
|
||||
"&create=true")
|
||||
json->directory))
|
||||
|
||||
(define* (read-contents object #:key offset length)
|
||||
"Return an input port to read the content of OBJECT from."
|
||||
(call (string-append (%ipfs-base-url)
|
||||
"/api/v0/cat?arg=" object)
|
||||
#f))
|
||||
|
||||
(define* (publish-name object)
|
||||
"Publish OBJECT under the current peer ID."
|
||||
(call (string-append (%ipfs-base-url)
|
||||
"/api/v0/name/publish?arg=" object)
|
||||
json->binding))
|
||||
63
guix/json.scm
Normal file
63
guix/json.scm
Normal file
@@ -0,0 +1,63 @@
|
||||
;;; GNU Guix --- Functional package management for GNU
|
||||
;;; Copyright © 2018 Ludovic Courtès <ludo@gnu.org>
|
||||
;;;
|
||||
;;; This file is part of GNU Guix.
|
||||
;;;
|
||||
;;; GNU Guix is free software; you can redistribute it and/or modify it
|
||||
;;; under the terms of the GNU General Public License as published by
|
||||
;;; the Free Software Foundation; either version 3 of the License, or (at
|
||||
;;; your option) any later version.
|
||||
;;;
|
||||
;;; GNU Guix is distributed in the hope that it will be useful, but
|
||||
;;; WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
;;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
;;; GNU General Public License for more details.
|
||||
;;;
|
||||
;;; You should have received a copy of the GNU General Public License
|
||||
;;; along with GNU Guix. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
(define-module (guix json)
|
||||
#:use-module (json)
|
||||
#:use-module (srfi srfi-9)
|
||||
#:export (define-json-mapping))
|
||||
|
||||
;;; Commentary:
|
||||
;;;
|
||||
;;; This module provides tools to define mappings from JSON objects to SRFI-9
|
||||
;;; records. This is useful when writing bindings to HTTP APIs.
|
||||
;;;
|
||||
;;; Code:
|
||||
|
||||
(define-syntax-rule (define-json-reader json->record ctor spec ...)
|
||||
"Define JSON->RECORD as a procedure that converts a JSON representation,
|
||||
read from a port, string, or hash table, into a record created by CTOR and
|
||||
following SPEC, a series of field specifications."
|
||||
(define (json->record input)
|
||||
(let ((table (cond ((port? input)
|
||||
(json->scm input))
|
||||
((string? input)
|
||||
(json-string->scm input))
|
||||
((hash-table? input)
|
||||
input))))
|
||||
(let-syntax ((extract-field (syntax-rules ()
|
||||
((_ table (field key json->value))
|
||||
(json->value (hash-ref table key)))
|
||||
((_ table (field key))
|
||||
(hash-ref table key))
|
||||
((_ table (field))
|
||||
(hash-ref table
|
||||
(symbol->string 'field))))))
|
||||
(ctor (extract-field table spec) ...)))))
|
||||
|
||||
(define-syntax-rule (define-json-mapping rtd ctor pred json->record
|
||||
(field getter spec ...) ...)
|
||||
"Define RTD as a record type with the given FIELDs and GETTERs, à la SRFI-9,
|
||||
and define JSON->RECORD as a conversion from JSON to a record of this type."
|
||||
(begin
|
||||
(define-record-type rtd
|
||||
(ctor field ...)
|
||||
pred
|
||||
(field getter) ...)
|
||||
|
||||
(define-json-reader json->record ctor
|
||||
(field spec ...) ...)))
|
||||
@@ -59,6 +59,7 @@
|
||||
#:use-module ((guix build utils)
|
||||
#:select (dump-port mkdir-p find-files))
|
||||
#:use-module ((guix build syscalls) #:select (set-thread-name))
|
||||
#:use-module ((guix ipfs) #:prefix ipfs:)
|
||||
#:export (%public-key
|
||||
%private-key
|
||||
|
||||
@@ -78,6 +79,8 @@ Publish ~a over HTTP.\n") %store-directory)
|
||||
compress archives at LEVEL"))
|
||||
(display (G_ "
|
||||
-c, --cache=DIRECTORY cache published items to DIRECTORY"))
|
||||
(display (G_ "
|
||||
--ipfs[=GATEWAY] publish items over IPFS via GATEWAY"))
|
||||
(display (G_ "
|
||||
--workers=N use N workers to bake items"))
|
||||
(display (G_ "
|
||||
@@ -168,6 +171,10 @@ compression disabled~%"))
|
||||
(option '(#\c "cache") #t #f
|
||||
(lambda (opt name arg result)
|
||||
(alist-cons 'cache arg result)))
|
||||
(option '("ipfs") #f #t
|
||||
(lambda (opt name arg result)
|
||||
(alist-cons 'ipfs (or arg (ipfs:%ipfs-base-url))
|
||||
result)))
|
||||
(option '("workers") #t #f
|
||||
(lambda (opt name arg result)
|
||||
(alist-cons 'workers (string->number* arg)
|
||||
@@ -237,12 +244,15 @@ compression disabled~%"))
|
||||
|
||||
(define* (narinfo-string store store-path key
|
||||
#:key (compression %no-compression)
|
||||
(nar-path "nar") file-size)
|
||||
(nar-path "nar") file-size ipfs)
|
||||
"Generate a narinfo key/value string for STORE-PATH; an exception is raised
|
||||
if STORE-PATH is invalid. Produce a URL that corresponds to COMPRESSION. The
|
||||
narinfo is signed with KEY. NAR-PATH specifies the prefix for nar URLs.
|
||||
|
||||
Optionally, FILE-SIZE can specify the size in bytes of the compressed NAR; it
|
||||
informs the client of how much needs to be downloaded."
|
||||
informs the client of how much needs to be downloaded.
|
||||
|
||||
When IPFS is true, it is the IPFS object identifier for STORE-PATH."
|
||||
(let* ((path-info (query-path-info store store-path))
|
||||
(compression (actual-compression store-path compression))
|
||||
(url (encode-and-join-uri-path
|
||||
@@ -295,7 +305,12 @@ References: ~a~%~a"
|
||||
(apply throw args))))))
|
||||
(signature (base64-encode-string
|
||||
(canonical-sexp->string (signed-string info)))))
|
||||
(format #f "~aSignature: 1;~a;~a~%" info (gethostname) signature)))
|
||||
(format #f "~aSignature: 1;~a;~a~%~a" info (gethostname) signature
|
||||
|
||||
;; Append IPFS info below the signed part.
|
||||
(if ipfs
|
||||
(string-append "IPFS: " ipfs "\n")
|
||||
""))))
|
||||
|
||||
(define* (not-found request
|
||||
#:key (phrase "Resource not found")
|
||||
@@ -406,10 +421,12 @@ items. Failing that, we could eventually have to recompute them and return
|
||||
(define* (render-narinfo/cached store request hash
|
||||
#:key ttl (compression %no-compression)
|
||||
(nar-path "nar")
|
||||
cache pool)
|
||||
cache pool ipfs?)
|
||||
"Respond to the narinfo request for REQUEST. If the narinfo is available in
|
||||
CACHE, then send it; otherwise, return 404 and \"bake\" that nar and narinfo
|
||||
requested using POOL."
|
||||
requested using POOL.
|
||||
|
||||
When IPFS? is true, additionally publish binaries over IPFS."
|
||||
(define (delete-entry narinfo)
|
||||
;; Delete NARINFO and the corresponding nar from CACHE.
|
||||
(let ((nar (string-append (string-drop-right narinfo
|
||||
@@ -447,7 +464,8 @@ requested using POOL."
|
||||
(bake-narinfo+nar cache item
|
||||
#:ttl ttl
|
||||
#:compression compression
|
||||
#:nar-path nar-path)))
|
||||
#:nar-path nar-path
|
||||
#:ipfs? ipfs?)))
|
||||
|
||||
(when ttl
|
||||
(single-baker 'cache-cleanup
|
||||
@@ -465,7 +483,7 @@ requested using POOL."
|
||||
|
||||
(define* (bake-narinfo+nar cache item
|
||||
#:key ttl (compression %no-compression)
|
||||
(nar-path "/nar"))
|
||||
(nar-path "/nar") ipfs?)
|
||||
"Write the narinfo and nar for ITEM to CACHE."
|
||||
(let* ((compression (actual-compression item compression))
|
||||
(nar (nar-cache-file cache item
|
||||
@@ -502,7 +520,11 @@ requested using POOL."
|
||||
#:nar-path nar-path
|
||||
#:compression compression
|
||||
#:file-size (and=> (stat nar #f)
|
||||
stat:size))
|
||||
stat:size)
|
||||
#:ipfs
|
||||
(and ipfs?
|
||||
(ipfs:content-name
|
||||
(ipfs:add-file-tree item))))
|
||||
port))))))
|
||||
|
||||
;; XXX: Declare the 'X-Nar-Compression' HTTP header, which is in fact for
|
||||
@@ -766,7 +788,8 @@ blocking."
|
||||
cache pool
|
||||
narinfo-ttl
|
||||
(nar-path "nar")
|
||||
(compression %no-compression))
|
||||
(compression %no-compression)
|
||||
ipfs?)
|
||||
(define nar-path?
|
||||
(let ((expected (split-and-decode-uri-path nar-path)))
|
||||
(cut equal? expected <>)))
|
||||
@@ -793,7 +816,8 @@ blocking."
|
||||
#:pool pool
|
||||
#:ttl narinfo-ttl
|
||||
#:nar-path nar-path
|
||||
#:compression compression)
|
||||
#:compression compression
|
||||
#:ipfs? ipfs?)
|
||||
(render-narinfo store request hash
|
||||
#:ttl narinfo-ttl
|
||||
#:nar-path nar-path
|
||||
@@ -847,13 +871,14 @@ blocking."
|
||||
(define* (run-publish-server socket store
|
||||
#:key (compression %no-compression)
|
||||
(nar-path "nar") narinfo-ttl
|
||||
cache pool)
|
||||
cache pool ipfs?)
|
||||
(run-server (make-request-handler store
|
||||
#:cache cache
|
||||
#:pool pool
|
||||
#:nar-path nar-path
|
||||
#:narinfo-ttl narinfo-ttl
|
||||
#:compression compression)
|
||||
#:compression compression
|
||||
#:ipfs? ipfs?)
|
||||
concurrent-http-server
|
||||
`(#:socket ,socket)))
|
||||
|
||||
@@ -902,6 +927,7 @@ blocking."
|
||||
(repl-port (assoc-ref opts 'repl))
|
||||
(cache (assoc-ref opts 'cache))
|
||||
(workers (assoc-ref opts 'workers))
|
||||
(ipfs (assoc-ref opts 'ipfs))
|
||||
|
||||
;; Read the key right away so that (1) we fail early on if we can't
|
||||
;; access them, and (2) we can then drop privileges.
|
||||
@@ -930,14 +956,15 @@ consider using the '--user' option!~%")))
|
||||
(set-thread-name "guix publish")
|
||||
|
||||
(with-store store
|
||||
(run-publish-server socket store
|
||||
#:cache cache
|
||||
#:pool (and cache (make-pool workers
|
||||
#:thread-name
|
||||
"publish worker"))
|
||||
#:nar-path nar-path
|
||||
#:compression compression
|
||||
#:narinfo-ttl ttl))))))
|
||||
(parameterize ((ipfs:%ipfs-base-url ipfs))
|
||||
(run-publish-server socket store
|
||||
#:cache cache
|
||||
#:pool (and cache (make-pool workers
|
||||
#:thread-name
|
||||
"publish worker"))
|
||||
#:nar-path nar-path
|
||||
#:compression compression
|
||||
#:narinfo-ttl ttl)))))))
|
||||
|
||||
;;; Local Variables:
|
||||
;;; eval: (put 'single-baker 'scheme-indent-function 1)
|
||||
|
||||
@@ -42,6 +42,7 @@
|
||||
#:use-module (guix progress)
|
||||
#:use-module ((guix build syscalls)
|
||||
#:select (set-thread-name))
|
||||
#:use-module ((guix ipfs) #:prefix ipfs:)
|
||||
#:use-module (ice-9 rdelim)
|
||||
#:use-module (ice-9 regex)
|
||||
#:use-module (ice-9 match)
|
||||
@@ -281,7 +282,7 @@ failure, return #f and #f."
|
||||
|
||||
(define-record-type <narinfo>
|
||||
(%make-narinfo path uri uri-base compression file-hash file-size nar-hash nar-size
|
||||
references deriver system signature contents)
|
||||
references deriver system ipfs signature contents)
|
||||
narinfo?
|
||||
(path narinfo-path)
|
||||
(uri narinfo-uri)
|
||||
@@ -294,6 +295,7 @@ failure, return #f and #f."
|
||||
(references narinfo-references)
|
||||
(deriver narinfo-deriver)
|
||||
(system narinfo-system)
|
||||
(ipfs narinfo-ipfs)
|
||||
(signature narinfo-signature) ; canonical sexp
|
||||
;; The original contents of a narinfo file. This field is needed because we
|
||||
;; want to preserve the exact textual representation for verification purposes.
|
||||
@@ -335,7 +337,7 @@ s-expression: ~s~%")
|
||||
"Return a narinfo constructor for narinfos originating from CACHE-URL. STR
|
||||
must contain the original contents of a narinfo file."
|
||||
(lambda (path url compression file-hash file-size nar-hash nar-size
|
||||
references deriver system signature)
|
||||
references deriver system ipfs signature)
|
||||
"Return a new <narinfo> object."
|
||||
(%make-narinfo path
|
||||
;; Handle the case where URL is a relative URL.
|
||||
@@ -352,6 +354,7 @@ must contain the original contents of a narinfo file."
|
||||
((or #f "") #f)
|
||||
(_ deriver))
|
||||
system
|
||||
ipfs
|
||||
(false-if-exception
|
||||
(and=> signature narinfo-signature->canonical-sexp))
|
||||
str)))
|
||||
@@ -386,7 +389,7 @@ No authentication and authorization checks are performed here!"
|
||||
(narinfo-maker str url)
|
||||
'("StorePath" "URL" "Compression"
|
||||
"FileHash" "FileSize" "NarHash" "NarSize"
|
||||
"References" "Deriver" "System"
|
||||
"References" "Deriver" "System" "IPFS"
|
||||
"Signature"))))
|
||||
|
||||
(define (narinfo-sha256 narinfo)
|
||||
@@ -947,13 +950,58 @@ authorized substitutes."
|
||||
(wtf
|
||||
(error "unknown `--query' command" wtf))))
|
||||
|
||||
(define* (process-substitution/http narinfo destination uri
|
||||
#:key print-build-trace?)
|
||||
(unless print-build-trace?
|
||||
(format (current-error-port)
|
||||
(G_ "Downloading ~a...~%") (uri->string uri)))
|
||||
|
||||
(let*-values (((raw download-size)
|
||||
;; Note that Hydra currently generates Nars on the fly
|
||||
;; and doesn't specify a Content-Length, so
|
||||
;; DOWNLOAD-SIZE is #f in practice.
|
||||
(fetch uri #:buffered? #f #:timeout? #f))
|
||||
((progress)
|
||||
(let* ((comp (narinfo-compression narinfo))
|
||||
(dl-size (or download-size
|
||||
(and (equal? comp "none")
|
||||
(narinfo-size narinfo))))
|
||||
(reporter (if print-build-trace?
|
||||
(progress-reporter/trace
|
||||
destination
|
||||
(uri->string uri) dl-size
|
||||
(current-error-port))
|
||||
(progress-reporter/file
|
||||
(uri->string uri) dl-size
|
||||
(current-error-port)
|
||||
#:abbreviation nar-uri-abbreviation))))
|
||||
(progress-report-port reporter raw)))
|
||||
((input pids)
|
||||
;; NOTE: This 'progress' port of current process will be
|
||||
;; closed here, while the child process doing the
|
||||
;; reporting will close it upon exit.
|
||||
(decompressed-port (and=> (narinfo-compression narinfo)
|
||||
string->symbol)
|
||||
progress)))
|
||||
;; Unpack the Nar at INPUT into DESTINATION.
|
||||
(restore-file input destination)
|
||||
(close-port input)
|
||||
|
||||
;; Wait for the reporter to finish.
|
||||
(every (compose zero? cdr waitpid) pids)
|
||||
|
||||
;; Skip a line after what 'progress-reporter/file' printed, and another
|
||||
;; one to visually separate substitutions.
|
||||
(display "\n\n" (current-error-port))))
|
||||
|
||||
(define* (process-substitution store-item destination
|
||||
#:key cache-urls acl print-build-trace?)
|
||||
"Substitute STORE-ITEM (a store file name) from CACHE-URLS, and write it to
|
||||
DESTINATION as a nar file. Verify the substitute against ACL."
|
||||
(let* ((narinfo (lookup-narinfo cache-urls store-item
|
||||
(cut valid-narinfo? <> acl)))
|
||||
(uri (and=> narinfo narinfo-uri)))
|
||||
(uri (and=> narinfo narinfo-uri))
|
||||
(ipfs (and=> narinfo narinfo-ipfs)))
|
||||
(unless uri
|
||||
(leave (G_ "no valid substitute for '~a'~%")
|
||||
store-item))
|
||||
@@ -961,47 +1009,15 @@ DESTINATION as a nar file. Verify the substitute against ACL."
|
||||
;; Tell the daemon what the expected hash of the Nar itself is.
|
||||
(format #t "~a~%" (narinfo-hash narinfo))
|
||||
|
||||
(unless print-build-trace?
|
||||
(format (current-error-port)
|
||||
(G_ "Downloading ~a...~%") (uri->string uri)))
|
||||
|
||||
(let*-values (((raw download-size)
|
||||
;; Note that Hydra currently generates Nars on the fly
|
||||
;; and doesn't specify a Content-Length, so
|
||||
;; DOWNLOAD-SIZE is #f in practice.
|
||||
(fetch uri #:buffered? #f #:timeout? #f))
|
||||
((progress)
|
||||
(let* ((comp (narinfo-compression narinfo))
|
||||
(dl-size (or download-size
|
||||
(and (equal? comp "none")
|
||||
(narinfo-size narinfo))))
|
||||
(reporter (if print-build-trace?
|
||||
(progress-reporter/trace
|
||||
destination
|
||||
(uri->string uri) dl-size
|
||||
(current-error-port))
|
||||
(progress-reporter/file
|
||||
(uri->string uri) dl-size
|
||||
(current-error-port)
|
||||
#:abbreviation nar-uri-abbreviation))))
|
||||
(progress-report-port reporter raw)))
|
||||
((input pids)
|
||||
;; NOTE: This 'progress' port of current process will be
|
||||
;; closed here, while the child process doing the
|
||||
;; reporting will close it upon exit.
|
||||
(decompressed-port (and=> (narinfo-compression narinfo)
|
||||
string->symbol)
|
||||
progress)))
|
||||
;; Unpack the Nar at INPUT into DESTINATION.
|
||||
(restore-file input destination)
|
||||
(close-port input)
|
||||
|
||||
;; Wait for the reporter to finish.
|
||||
(every (compose zero? cdr waitpid) pids)
|
||||
|
||||
;; Skip a line after what 'progress-reporter/file' printed, and another
|
||||
;; one to visually separate substitutions.
|
||||
(display "\n\n" (current-error-port)))))
|
||||
(if ipfs
|
||||
(begin
|
||||
(unless print-build-trace?
|
||||
(format (current-error-port)
|
||||
(G_ "Downloading from IPFS ~s...~%") ipfs))
|
||||
(ipfs:restore-file-tree ipfs destination))
|
||||
(process-substitution/http narinfo destination uri
|
||||
#:print-build-trace?
|
||||
print-build-trace?))))
|
||||
|
||||
|
||||
;;;
|
||||
|
||||
35
guix/swh.scm
35
guix/swh.scm
@@ -23,6 +23,7 @@
|
||||
#:use-module (web client)
|
||||
#:use-module (web response)
|
||||
#:use-module (json)
|
||||
#:use-module (guix json)
|
||||
#:use-module (srfi srfi-1)
|
||||
#:use-module (srfi srfi-9)
|
||||
#:use-module (srfi srfi-11)
|
||||
@@ -127,40 +128,6 @@
|
||||
url
|
||||
(string-append url "/")))
|
||||
|
||||
(define-syntax-rule (define-json-reader json->record ctor spec ...)
|
||||
"Define JSON->RECORD as a procedure that converts a JSON representation,
|
||||
read from a port, string, or hash table, into a record created by CTOR and
|
||||
following SPEC, a series of field specifications."
|
||||
(define (json->record input)
|
||||
(let ((table (cond ((port? input)
|
||||
(json->scm input))
|
||||
((string? input)
|
||||
(json-string->scm input))
|
||||
((hash-table? input)
|
||||
input))))
|
||||
(let-syntax ((extract-field (syntax-rules ()
|
||||
((_ table (field key json->value))
|
||||
(json->value (hash-ref table key)))
|
||||
((_ table (field key))
|
||||
(hash-ref table key))
|
||||
((_ table (field))
|
||||
(hash-ref table
|
||||
(symbol->string 'field))))))
|
||||
(ctor (extract-field table spec) ...)))))
|
||||
|
||||
(define-syntax-rule (define-json-mapping rtd ctor pred json->record
|
||||
(field getter spec ...) ...)
|
||||
"Define RTD as a record type with the given FIELDs and GETTERs, à la SRFI-9,
|
||||
and define JSON->RECORD as a conversion from JSON to a record of this type."
|
||||
(begin
|
||||
(define-record-type rtd
|
||||
(ctor field ...)
|
||||
pred
|
||||
(field getter) ...)
|
||||
|
||||
(define-json-reader json->record ctor
|
||||
(field spec ...) ...)))
|
||||
|
||||
(define %date-regexp
|
||||
;; Match strings like "2014-11-17T22:09:38+01:00" or
|
||||
;; "2018-09-30T23:20:07.815449+00:00"".
|
||||
|
||||
@@ -26,9 +26,12 @@
|
||||
#:use-module (gcrypt hash)
|
||||
#:use-module (guix build-system gnu)
|
||||
#:use-module (gnu packages bootstrap)
|
||||
#:use-module (srfi srfi-1)
|
||||
#:use-module (srfi srfi-26)
|
||||
#:use-module (srfi srfi-34)
|
||||
#:use-module (srfi srfi-64)
|
||||
#:use-module (rnrs bytevectors)
|
||||
#:use-module (ice-9 ftw)
|
||||
#:use-module (ice-9 binary-ports)
|
||||
#:use-module (web uri)
|
||||
#:export (open-connection-for-tests
|
||||
@@ -138,16 +141,31 @@ too expensive to build entirely in the test store."
|
||||
(loop (1+ i)))
|
||||
bv))))
|
||||
|
||||
(define (not-dot? entry)
|
||||
(not (member entry '("." ".."))))
|
||||
|
||||
(define (file=? a b)
|
||||
"Return true if files A and B have the same type and same content."
|
||||
"Return true if files A and B have the same type and same content,
|
||||
recursively."
|
||||
(define (executable? file)
|
||||
(->bool (logand (stat:mode (lstat file)) #o100)))
|
||||
|
||||
(and (eq? (stat:type (lstat a)) (stat:type (lstat b)))
|
||||
(case (stat:type (lstat a))
|
||||
((regular)
|
||||
(equal?
|
||||
(call-with-input-file a get-bytevector-all)
|
||||
(call-with-input-file b get-bytevector-all)))
|
||||
(and (eqv? (executable? a) (executable? b))
|
||||
(equal?
|
||||
(call-with-input-file a get-bytevector-all)
|
||||
(call-with-input-file b get-bytevector-all))))
|
||||
((symlink)
|
||||
(string=? (readlink a) (readlink b)))
|
||||
((directory)
|
||||
(let ((lst1 (scandir a not-dot?))
|
||||
(lst2 (scandir b not-dot?)))
|
||||
(and (equal? lst1 lst2)
|
||||
(every file=?
|
||||
(map (cut string-append a "/" <>) lst1)
|
||||
(map (cut string-append b "/" <>) lst2)))))
|
||||
(else
|
||||
(error "what?" (lstat a))))))
|
||||
|
||||
|
||||
55
tests/ipfs.scm
Normal file
55
tests/ipfs.scm
Normal file
@@ -0,0 +1,55 @@
|
||||
;;; GNU Guix --- Functional package management for GNU
|
||||
;;; Copyright © 2018 Ludovic Courtès <ludo@gnu.org>
|
||||
;;;
|
||||
;;; This file is part of GNU Guix.
|
||||
;;;
|
||||
;;; GNU Guix is free software; you can redistribute it and/or modify it
|
||||
;;; under the terms of the GNU General Public License as published by
|
||||
;;; the Free Software Foundation; either version 3 of the License, or (at
|
||||
;;; your option) any later version.
|
||||
;;;
|
||||
;;; GNU Guix is distributed in the hope that it will be useful, but
|
||||
;;; WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
;;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
;;; GNU General Public License for more details.
|
||||
;;;
|
||||
;;; You should have received a copy of the GNU General Public License
|
||||
;;; along with GNU Guix. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
(define-module (test-ipfs)
|
||||
#:use-module (guix ipfs)
|
||||
#:use-module ((guix utils) #:select (call-with-temporary-directory))
|
||||
#:use-module (guix tests)
|
||||
#:use-module (web uri)
|
||||
#:use-module (srfi srfi-64))
|
||||
|
||||
;; Test the (guix ipfs) module.
|
||||
|
||||
(define (ipfs-gateway-running?)
|
||||
"Return true if the IPFS gateway is running at %IPFS-BASE-URL."
|
||||
(let* ((uri (string->uri (%ipfs-base-url)))
|
||||
(socket (socket AF_INET SOCK_STREAM 0)))
|
||||
(define connected?
|
||||
(catch 'system-error
|
||||
(lambda ()
|
||||
(format (current-error-port)
|
||||
"probing IPFS gateway at localhost:~a...~%"
|
||||
(uri-port uri))
|
||||
(connect socket AF_INET INADDR_LOOPBACK (uri-port uri))
|
||||
#t)
|
||||
(const #f)))
|
||||
|
||||
(close-port socket)
|
||||
connected?))
|
||||
|
||||
(unless (ipfs-gateway-running?)
|
||||
(test-skip 1))
|
||||
|
||||
(test-assert "add-file-tree + restore-file-tree"
|
||||
(call-with-temporary-directory
|
||||
(lambda (directory)
|
||||
(let* ((source (dirname (search-path %load-path "guix/base32.scm")))
|
||||
(target (string-append directory "/r"))
|
||||
(content (pk 'content (add-file-tree source))))
|
||||
(restore-file-tree (content-name content) target)
|
||||
(file=? source target)))))
|
||||
Reference in New Issue
Block a user